Push version 1.2.7 to trunk.

Improved debugger and profiler support.

Reduced compilation time by improving the handling of deferred code.

Optimized interceptor accesses where the property is on the object on which the interceptors is attached.

Fixed compilation problem on GCC 4.4 by changing the stack alignment to 16 bytes.

Fixed handle creation to follow stric aliasing rules.

Fixed compilation on FreeBSD.

Introduced API for forcing the deletion of a property ignoring interceptors and attributes.


git-svn-id: http://v8.googlecode.com/svn/trunk@2121 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/AUTHORS b/AUTHORS
index ba1d8db..9b198d0 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -15,3 +15,4 @@
 Paolo Giarrusso <p.giarrusso@gmail.com>
 Rafal Krypa <rafal@krypa.net>
 Rene Rebe <rene@exactcode.de>
+Ryan Dahl <coldredlemur@gmail.com>
diff --git a/ChangeLog b/ChangeLog
index 686684d..3df6885 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+2009-06-08: Version 1.2.7
+
+        Improved debugger and profiler support.
+
+        Reduced compilation time by improving the handling of deferred
+        code.
+
+        Optimized interceptor accesses where the property is on the object
+        on which the interceptors is attached.
+
+        Fixed compilation problem on GCC 4.4 by changing the stack
+        alignment to 16 bytes.
+
+        Fixed handle creation to follow stric aliasing rules.
+
+        Fixed compilation on FreeBSD.
+
+        Introduced API for forcing the deletion of a property ignoring
+        interceptors and attributes.
+
+
 2009-05-29: Version 1.2.6
 
         Added a histogram recording hit rates at different levels of the
diff --git a/SConstruct b/SConstruct
index 5ebdc54..3b14eea 100644
--- a/SConstruct
+++ b/SConstruct
@@ -44,10 +44,10 @@
   ANDROID_TOP=""
 
 # TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
-# on linux we need these compiler flags to avoid a mksnapshot segfault, avoid
-# crashes in the v8 test suite and avoid dtoa.c strict aliasing issues
+# on linux we need these compiler flags to avoid crashes in the v8 test suite
+# and avoid dtoa.c strict aliasing issues
 if os.environ.get('GCC_VERSION') == '44':
-    GCC_EXTRA_CCFLAGS = ['-fno-tree-vectorize', '-fno-tree-vrp']
+    GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
     GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
 else:
     GCC_EXTRA_CCFLAGS = []
@@ -128,6 +128,8 @@
       'CCFLAGS':      ['-ansi'],
     },
     'os:freebsd': {
+      'CPPPATH' : ['/usr/local/include'],
+      'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
     'os:win32': {
@@ -270,7 +272,7 @@
       'LIBS': ['pthread'],
     },
     'os:freebsd': {
-      'LIBS': ['pthread'],
+      'LIBS': ['execinfo', 'pthread']
     },
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
@@ -382,6 +384,7 @@
       'LIBS':         ['pthread'],
     },
     'os:freebsd': {
+      'LIBPATH' : ['/usr/local/lib'],
       'LIBS':         ['execinfo', 'pthread']
     },
     'os:win32': {
diff --git a/include/v8.h b/include/v8.h
index 3fd6fc6..87ce2a2 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1056,7 +1056,7 @@
            Handle<Value> value,
            PropertyAttribute attribs = None);
 
-  // Sets a local property on this object, bypassing interceptors and
+  // Sets a local property on this object bypassing interceptors and
   // overriding accessors or read-only properties.
   //
   // Note that if the object has an interceptor the property will be set
@@ -1067,13 +1067,21 @@
   bool ForceSet(Handle<Value> key,
                 Handle<Value> value,
                 PropertyAttribute attribs = None);
+
   Local<Value> Get(Handle<Value> key);
 
   // TODO(1245389): Replace the type-specific versions of these
   // functions with generic ones that accept a Handle<Value> key.
   bool Has(Handle<String> key);
+
   bool Delete(Handle<String> key);
+
+  // Delete a property on this object bypassing interceptors and
+  // ignoring dont-delete attributes.
+  bool ForceDelete(Handle<Value> key);
+
   bool Has(uint32_t index);
+
   bool Delete(uint32_t index);
 
   /**
diff --git a/src/accessors.cc b/src/accessors.cc
index ee73ac0..ac6cdf9 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -289,6 +289,24 @@
 
 
 //
+// Accessors::ScriptCompilationType
+//
+
+
+Object* Accessors::ScriptGetCompilationType(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->compilation_type();
+}
+
+
+const AccessorDescriptor Accessors::ScriptCompilationType = {
+  ScriptGetCompilationType,
+  IllegalSetter,
+  0
+};
+
+
+//
 // Accessors::ScriptGetLineEnds
 //
 
@@ -314,9 +332,8 @@
 
 
 Object* Accessors::ScriptGetContextData(Object* object, void*) {
-  HandleScope scope;
-  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
-  return script->context_data();
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->context_data();
 }
 
 
@@ -328,6 +345,54 @@
 
 
 //
+// Accessors::ScriptGetEvalFromFunction
+//
+
+
+Object* Accessors::ScriptGetEvalFromFunction(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->eval_from_function();
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromFunction = {
+  ScriptGetEvalFromFunction,
+  IllegalSetter,
+  0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromPosition
+//
+
+
+Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
+  HandleScope scope;
+  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+
+  // If this is not a script compiled through eval there is no eval position.
+  int compilation_type = Smi::cast(script->compilation_type())->value();
+  if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
+    return Heap::undefined_value();
+  }
+
+  // Get the function from where eval was called and find the source position
+  // from the instruction offset.
+  Handle<Code> code(JSFunction::cast(script->eval_from_function())->code());
+  return Smi::FromInt(code->SourcePosition(code->instruction_start() +
+                      script->eval_from_instructions_offset()->value()));
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromPosition = {
+  ScriptGetEvalFromPosition,
+  IllegalSetter,
+  0
+};
+
+
+//
 // Accessors::FunctionPrototype
 //
 
diff --git a/src/accessors.h b/src/accessors.h
index 9e877d1..51d322e 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -34,22 +34,25 @@
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
 #define ACCESSOR_DESCRIPTOR_LIST(V) \
-  V(FunctionPrototype)   \
-  V(FunctionLength)      \
-  V(FunctionName)        \
-  V(FunctionArguments)   \
-  V(FunctionCaller)      \
-  V(ArrayLength)         \
-  V(StringLength)        \
-  V(ScriptSource)        \
-  V(ScriptName)          \
-  V(ScriptId)            \
-  V(ScriptLineOffset)    \
-  V(ScriptColumnOffset)  \
-  V(ScriptData)          \
-  V(ScriptType)          \
-  V(ScriptLineEnds)      \
-  V(ScriptContextData)   \
+  V(FunctionPrototype)              \
+  V(FunctionLength)                 \
+  V(FunctionName)                   \
+  V(FunctionArguments)              \
+  V(FunctionCaller)                 \
+  V(ArrayLength)                    \
+  V(StringLength)                   \
+  V(ScriptSource)                   \
+  V(ScriptName)                     \
+  V(ScriptId)                       \
+  V(ScriptLineOffset)               \
+  V(ScriptColumnOffset)             \
+  V(ScriptData)                     \
+  V(ScriptType)                     \
+  V(ScriptCompilationType)          \
+  V(ScriptLineEnds)                 \
+  V(ScriptContextData)              \
+  V(ScriptEvalFromFunction)         \
+  V(ScriptEvalFromPosition)         \
   V(ObjectPrototype)
 
 // Accessors contains all predefined proxy accessors.
@@ -89,8 +92,11 @@
   static Object* ScriptGetColumnOffset(Object* object, void*);
   static Object* ScriptGetData(Object* object, void*);
   static Object* ScriptGetType(Object* object, void*);
+  static Object* ScriptGetCompilationType(Object* object, void*);
   static Object* ScriptGetLineEnds(Object* object, void*);
   static Object* ScriptGetContextData(Object* object, void*);
+  static Object* ScriptGetEvalFromFunction(Object* object, void*);
+  static Object* ScriptGetEvalFromPosition(Object* object, void*);
   static Object* ObjectGetPrototype(Object* receiver, void*);
   static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
 
diff --git a/src/api.cc b/src/api.cc
index 48a9d1a..7b7f290 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -416,7 +416,8 @@
 
 
 void** v8::HandleScope::CreateHandle(void* value) {
-  return i::HandleScope::CreateHandle(value);
+  return reinterpret_cast<void**>(
+      i::HandleScope::CreateHandle(reinterpret_cast<i::Object*>(value)));
 }
 
 
@@ -1891,6 +1892,19 @@
 }
 
 
+bool v8::Object::ForceDelete(v8::Handle<Value> key) {
+  ON_BAILOUT("v8::Object::ForceDelete()", return false);
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return obj->IsTrue();
+}
+
+
 Local<Value> v8::Object::Get(v8::Handle<Value> key) {
   ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
   ENTER_V8;
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
new file mode 100644
index 0000000..544331a
--- /dev/null
+++ b/src/arm/codegen-arm-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ARM_CODEGEN_ARM_INL_H_
+#define V8_ARM_CODEGEN_ARM_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index fe6d945..7428d3b 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -41,6 +41,34 @@
 
 #define __ ACCESS_MASM(masm_)
 
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+    }
+  }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+    }
+  }
+}
+
 
 // -------------------------------------------------------------------------
 // CodeGenState implementation.
@@ -776,23 +804,14 @@
 };
 
 
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
 void DeferredInlineSmiOperation::Generate() {
-  MacroAssembler* masm = cgen()->masm();
-  enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope;
-
   switch (op_) {
     case Token::ADD: {
+      // Revert optimistic add.
       if (reversed_) {
-        // revert optimistic add
         __ sub(r0, r0, Operand(Smi::FromInt(value_)));
         __ mov(r1, Operand(Smi::FromInt(value_)));
       } else {
-        // revert optimistic add
         __ sub(r1, r0, Operand(Smi::FromInt(value_)));
         __ mov(r0, Operand(Smi::FromInt(value_)));
       }
@@ -800,8 +819,8 @@
     }
 
     case Token::SUB: {
+      // Revert optimistic sub.
       if (reversed_) {
-        // revert optimistic sub
         __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
         __ mov(r1, Operand(Smi::FromInt(value_)));
       } else {
@@ -830,31 +849,22 @@
         __ mov(r1, Operand(r0));
         __ mov(r0, Operand(Smi::FromInt(value_)));
       } else {
-        UNREACHABLE();  // should have been handled in SmiOperation
+        UNREACHABLE();  // Should have been handled in SmiOperation.
       }
       break;
     }
 
     default:
-      // other cases should have been handled before this point.
+      // Other cases should have been handled before this point.
       UNREACHABLE();
       break;
   }
 
-  GenericBinaryOpStub igostub(op_, overwrite_mode_);
-  Result arg0 = cgen()->allocator()->Allocate(r1);
-  ASSERT(arg0.is_valid());
-  Result arg1 = cgen()->allocator()->Allocate(r0);
-  ASSERT(arg1.is_valid());
-  cgen()->frame()->CallStub(&igostub, &arg0, &arg1);
-  exit_.Jump();
+  GenericBinaryOpStub stub(op_, overwrite_mode_);
+  __ CallStub(&stub);
 }
 
 
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-
 void CodeGenerator::SmiOperation(Token::Value op,
                                  Handle<Object> value,
                                  bool reversed,
@@ -877,28 +887,28 @@
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
 
       __ add(r0, r0, Operand(value), SetCC);
-      deferred->enter()->Branch(vs);
+      deferred->Branch(vs);
       __ tst(r0, Operand(kSmiTagMask));
-      deferred->enter()->Branch(ne);
+      deferred->Branch(ne);
       deferred->BindExit();
       break;
     }
 
     case Token::SUB: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
 
-      if (!reversed) {
-        __ sub(r0, r0, Operand(value), SetCC);
-      } else {
+      if (reversed) {
         __ rsb(r0, r0, Operand(value), SetCC);
+      } else {
+        __ sub(r0, r0, Operand(value), SetCC);
       }
-      deferred->enter()->Branch(vs);
+      deferred->Branch(vs);
       __ tst(r0, Operand(kSmiTagMask));
-      deferred->enter()->Branch(ne);
+      deferred->Branch(ne);
       deferred->BindExit();
       break;
     }
@@ -909,7 +919,7 @@
       DeferredCode* deferred =
         new DeferredInlineSmiOperation(op, int_value, reversed, mode);
       __ tst(r0, Operand(kSmiTagMask));
-      deferred->enter()->Branch(ne);
+      deferred->Branch(ne);
       switch (op) {
         case Token::BIT_OR:  __ orr(r0, r0, Operand(value)); break;
         case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
@@ -934,14 +944,14 @@
         DeferredCode* deferred =
           new DeferredInlineSmiOperation(op, shift_value, false, mode);
         __ tst(r0, Operand(kSmiTagMask));
-        deferred->enter()->Branch(ne);
+        deferred->Branch(ne);
         __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // remove tags
         switch (op) {
           case Token::SHL: {
             __ mov(r2, Operand(r2, LSL, shift_value));
             // check that the *unsigned* result fits in a smi
             __ add(r3, r2, Operand(0x40000000), SetCC);
-            deferred->enter()->Branch(mi);
+            deferred->Branch(mi);
             break;
           }
           case Token::SHR: {
@@ -956,7 +966,7 @@
             // smi tagging these two cases can only happen with shifts
             // by 0 or 1 when handed a valid smi
             __ and_(r3, r2, Operand(0xc0000000), SetCC);
-            deferred->enter()->Branch(ne);
+            deferred->Branch(ne);
             break;
           }
           case Token::SAR: {
@@ -2670,40 +2680,25 @@
 };
 
 
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
 void DeferredObjectLiteral::Generate() {
-  MacroAssembler* masm = cgen()->masm();
   // Argument is passed in r1.
-  enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope;
 
   // If the entry is undefined we call the runtime system to compute
   // the literal.
-
-  VirtualFrame* frame = cgen()->frame();
   // Literal array (0).
-  frame->EmitPush(r1);
+  __ push(r1);
   // Literal index (1).
   __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
-  frame->EmitPush(r0);
+  __ push(r0);
   // Constant properties (2).
   __ mov(r0, Operand(node_->constant_properties()));
-  frame->EmitPush(r0);
-  Result boilerplate =
-      frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ mov(r2, Operand(boilerplate.reg()));
+  __ push(r0);
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ mov(r2, Operand(r0));
   // Result is returned in r2.
-  exit_.Jump();
 }
 
 
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-
 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
@@ -2729,7 +2724,7 @@
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code.
   __ cmp(r2, Operand(Factory::undefined_value()));
-  deferred->enter()->Branch(eq);
+  deferred->Branch(eq);
   deferred->BindExit();
 
   // Push the object literal boilerplate.
@@ -2807,40 +2802,25 @@
 };
 
 
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
 void DeferredArrayLiteral::Generate() {
-  MacroAssembler* masm = cgen()->masm();
   // Argument is passed in r1.
-  enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope;
 
   // If the entry is undefined we call the runtime system to computed
   // the literal.
-
-  VirtualFrame* frame = cgen()->frame();
   // Literal array (0).
-  frame->EmitPush(r1);
+  __ push(r1);
   // Literal index (1).
   __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
-  frame->EmitPush(r0);
+  __ push(r0);
   // Constant properties (2).
   __ mov(r0, Operand(node_->literals()));
-  frame->EmitPush(r0);
-  Result boilerplate =
-      frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-  __ mov(r2, Operand(boilerplate.reg()));
+  __ push(r0);
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  __ mov(r2, Operand(r0));
   // Result is returned in r2.
-  exit_.Jump();
 }
 
 
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-
 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
@@ -2866,7 +2846,7 @@
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code.
   __ cmp(r2, Operand(Factory::undefined_value()));
-  deferred->enter()->Branch(eq);
+  deferred->Branch(eq);
   deferred->BindExit();
 
   // Push the object literal boilerplate.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3eb8269..c09f9e3 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -246,6 +246,7 @@
 void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
                                            JSObject* object,
                                            JSObject* holder,
+                                           Smi* lookup_hint,
                                            Register receiver,
                                            Register name,
                                            Register scratch1,
@@ -263,11 +264,13 @@
   __ push(receiver);  // receiver
   __ push(reg);  // holder
   __ push(name);  // name
+  __ mov(scratch1, Operand(lookup_hint));
+  __ push(scratch1);
 
   // Do tail-call to the runtime system.
   ExternalReference load_ic_property =
       ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
-  __ TailCallRuntime(load_ic_property, 3);
+  __ TailCallRuntime(load_ic_property, 4);
 }
 
 
@@ -909,7 +912,15 @@
 
   __ ldr(r0, MemOperand(sp, 0));
 
-  GenerateLoadInterceptor(masm(), object, holder, r0, r2, r3, r1, &miss);
+  GenerateLoadInterceptor(masm(),
+                          object,
+                          holder,
+                          holder->InterceptorPropertyLookupHint(name),
+                          r0,
+                          r2,
+                          r3,
+                          r1,
+                          &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1015,7 +1026,15 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadInterceptor(masm(), receiver, holder, r0, r2, r3, r1, &miss);
+  GenerateLoadInterceptor(masm(),
+                          receiver,
+                          holder,
+                          Smi::FromInt(JSObject::kLookupInHolder),
+                          r0,
+                          r2,
+                          r3,
+                          r1,
+                          &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index fc202e2..ebebd53 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -525,6 +525,8 @@
 
   bool Equals(VirtualFrame* other);
 
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
   friend class JumpTarget;
 };
 
diff --git a/src/assembler.cc b/src/assembler.cc
index f7b50d9..5dba75d 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -91,13 +91,13 @@
 //                     bits, the lowest 7 bits written first.
 //
 // data-jump + pos:    00 1110 11,
-//                     signed int, lowest byte written first
+//                     signed intptr_t, lowest byte written first
 //
 // data-jump + st.pos: 01 1110 11,
-//                     signed int, lowest byte written first
+//                     signed intptr_t, lowest byte written first
 //
 // data-jump + comm.:  10 1110 11,
-//                     signed int, lowest byte written first
+//                     signed intptr_t, lowest byte written first
 //
 const int kMaxRelocModes = 14;
 
@@ -159,7 +159,7 @@
 }
 
 
-void RelocInfoWriter::WriteTaggedData(int32_t data_delta, int tag) {
+void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
   *--pos_ = data_delta << kPositionTypeTagBits | tag;
 }
 
@@ -179,11 +179,12 @@
 }
 
 
-void RelocInfoWriter::WriteExtraTaggedData(int32_t data_delta, int top_tag) {
+void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
   WriteExtraTag(kDataJumpTag, top_tag);
-  for (int i = 0; i < kIntSize; i++) {
+  for (int i = 0; i < kIntptrSize; i++) {
     *--pos_ = data_delta;
-    data_delta = ArithmeticShiftRight(data_delta, kBitsPerByte);
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    data_delta = data_delta >> kBitsPerByte;
   }
 }
 
@@ -206,11 +207,13 @@
     WriteTaggedPC(pc_delta, kCodeTargetTag);
   } else if (RelocInfo::IsPosition(rmode)) {
     // Use signed delta-encoding for data.
-    int32_t data_delta = rinfo->data() - last_data_;
+    intptr_t data_delta = rinfo->data() - last_data_;
     int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
                                                     : kStatementPositionTag;
     // Check if data is small enough to fit in a tagged byte.
-    if (is_intn(data_delta, kSmallDataBits)) {
+    // We cannot use is_intn because data_delta is not an int32_t.
+    if (data_delta >= -(1 << (kSmallDataBits-1)) &&
+        data_delta < 1 << (kSmallDataBits-1)) {
       WriteTaggedPC(pc_delta, kPositionTag);
       WriteTaggedData(data_delta, pos_type_tag);
       last_data_ = rinfo->data();
@@ -264,9 +267,9 @@
 
 
 void RelocIterator::AdvanceReadData() {
-  int32_t x = 0;
-  for (int i = 0; i < kIntSize; i++) {
-    x |= *--pos_ << i * kBitsPerByte;
+  intptr_t x = 0;
+  for (int i = 0; i < kIntptrSize; i++) {
+    x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
   }
   rinfo_.data_ += x;
 }
@@ -295,7 +298,8 @@
 
 inline void RelocIterator::ReadTaggedData() {
   int8_t signed_b = *pos_;
-  rinfo_.data_ += ArithmeticShiftRight(signed_b, kPositionTypeTagBits);
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+  rinfo_.data_ += signed_b >> kPositionTypeTagBits;
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 3449063..66f952a 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -272,8 +272,8 @@
   inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
   inline void WriteTaggedPC(uint32_t pc_delta, int tag);
   inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
-  inline void WriteExtraTaggedData(int32_t data_delta, int top_tag);
-  inline void WriteTaggedData(int32_t data_delta, int tag);
+  inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
+  inline void WriteTaggedData(intptr_t data_delta, int tag);
   inline void WriteExtraTag(int extra_tag, int top_tag);
 
   byte* pos_;
@@ -424,8 +424,6 @@
 // -----------------------------------------------------------------------------
 // Utility functions
 
-// Move these into inline file?
-
 static inline bool is_intn(int x, int n)  {
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
@@ -437,9 +435,11 @@
   return (x & -(1 << n)) == 0;
 }
 
+static inline bool is_uint2(int x)  { return is_uintn(x, 2); }
 static inline bool is_uint3(int x)  { return is_uintn(x, 3); }
 static inline bool is_uint4(int x)  { return is_uintn(x, 4); }
 static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
+static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
 static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
 static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
 static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 546490d..89c92b0 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -47,7 +47,7 @@
 // generate an index for each native JS file.
 class SourceCodeCache BASE_EMBEDDED {
  public:
-  explicit SourceCodeCache(ScriptType type): type_(type) { }
+  explicit SourceCodeCache(Script::Type type): type_(type) { }
 
   void Initialize(bool create_heap_objects) {
     if (create_heap_objects) {
@@ -89,13 +89,13 @@
   }
 
  private:
-  ScriptType type_;
+  Script::Type type_;
   FixedArray* cache_;
   DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
 };
 
-static SourceCodeCache natives_cache(SCRIPT_TYPE_NATIVE);
-static SourceCodeCache extensions_cache(SCRIPT_TYPE_EXTENSION);
+static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
+static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
 
 
 Handle<String> Bootstrapper::NativesSourceLookup(int index) {
@@ -522,7 +522,7 @@
     empty_function->set_code(*code);
     Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
     Handle<Script> script = Factory::NewScript(source);
-    script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
     empty_function->shared()->set_script(*script);
     empty_function->shared()->set_start_position(0);
     empty_function->shared()->set_end_position(source->length());
@@ -1062,6 +1062,14 @@
             Factory::LookupAsciiSymbol("type"),
             proxy_type,
             common_attributes);
+    Handle<Proxy> proxy_compilation_type =
+        Factory::NewProxy(&Accessors::ScriptCompilationType);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("compilation_type"),
+            proxy_compilation_type,
+            common_attributes);
     Handle<Proxy> proxy_line_ends =
         Factory::NewProxy(&Accessors::ScriptLineEnds);
     script_descriptors =
@@ -1078,13 +1086,29 @@
             Factory::LookupAsciiSymbol("context_data"),
             proxy_context_data,
             common_attributes);
+    Handle<Proxy> proxy_eval_from_function =
+        Factory::NewProxy(&Accessors::ScriptEvalFromFunction);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("eval_from_function"),
+            proxy_eval_from_function,
+            common_attributes);
+    Handle<Proxy> proxy_eval_from_position =
+        Factory::NewProxy(&Accessors::ScriptEvalFromPosition);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("eval_from_position"),
+            proxy_eval_from_position,
+            common_attributes);
 
     Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
     script_map->set_instance_descriptors(*script_descriptors);
 
     // Allocate the empty script.
     Handle<Script> script = Factory::NewScript(Factory::empty_string());
-    script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
     global_context()->set_empty_script(*script);
   }
 
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index 12582a9..bee237d 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -32,27 +32,21 @@
 #include "codegen.h"
 #include "register-allocator-inl.h"
 
+#if V8_TARGET_ARCH_IA32
+#include "ia32/codegen-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/codegen-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/codegen-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+
 namespace v8 {
 namespace internal {
 
-
-void DeferredCode::SetEntryFrame(Result* arg) {
-  ASSERT(cgen()->has_valid_frame());
-  cgen()->frame()->Push(arg);
-  enter()->set_entry_frame(new VirtualFrame(cgen()->frame()));
-  *arg = cgen()->frame()->Pop();
-}
-
-
-void DeferredCode::SetEntryFrame(Result* arg0, Result* arg1) {
-  ASSERT(cgen()->has_valid_frame());
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  enter()->set_entry_frame(new VirtualFrame(cgen()->frame()));
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
+#define __ ACCESS_MASM(masm_)
 
 // -----------------------------------------------------------------------------
 // Support for "structured" code comments.
@@ -64,15 +58,12 @@
 
 class Comment BASE_EMBEDDED {
  public:
-  Comment(MacroAssembler* masm, const char* msg)
-    : masm_(masm),
-      msg_(msg) {
-    masm_->RecordComment(msg);
+  Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
+    __ RecordComment(msg);
   }
 
   ~Comment() {
-    if (msg_[0] == '[')
-      masm_->RecordComment("]");
+    if (msg_[0] == '[') __ RecordComment("]");
   }
 
  private:
@@ -89,6 +80,8 @@
 
 #endif  // DEBUG
 
+#undef __
+
 
 } }  // namespace v8::internal
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 3b288d4..f46269f 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -45,33 +45,54 @@
 CodeGenerator* CodeGeneratorScope::top_ = NULL;
 
 
-DeferredCode::DeferredCode() : exit_(JumpTarget::BIDIRECTIONAL) {
-  MacroAssembler* masm = cgen()->masm();
-  statement_position_ = masm->current_statement_position();
-  position_ = masm->current_position();
+DeferredCode::DeferredCode()
+    : masm_(CodeGeneratorScope::Current()->masm()),
+      statement_position_(masm_->current_statement_position()),
+      position_(masm_->current_position()) {
   ASSERT(statement_position_ != RelocInfo::kNoPosition);
   ASSERT(position_ != RelocInfo::kNoPosition);
 
-  cgen()->AddDeferred(this);
+  CodeGeneratorScope::Current()->AddDeferred(this);
 #ifdef DEBUG
   comment_ = "";
 #endif
+
+  // Copy the register locations from the code generator's frame.
+  // These are the registers that will be spilled on entry to the
+  // deferred code and restored on exit.
+  VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
+  int sp_offset = frame->fp_relative(frame->stack_pointer_);
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int loc = frame->register_location(i);
+    if (loc == VirtualFrame::kIllegalIndex) {
+      registers_[i] = kIgnore;
+    } else if (frame->elements_[loc].is_synced()) {
+      // Needs to be restored on exit but not saved on entry.
+      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
+    } else {
+      int offset = frame->fp_relative(loc);
+      registers_[i] = (offset < sp_offset) ? kPush : offset;
+    }
+  }
 }
 
 
 void CodeGenerator::ProcessDeferred() {
   while (!deferred_.is_empty()) {
     DeferredCode* code = deferred_.RemoveLast();
-    MacroAssembler* masm = code->cgen()->masm();
+    ASSERT(masm_ == code->masm());
     // Record position of deferred code stub.
-    masm->RecordStatementPosition(code->statement_position());
+    masm_->RecordStatementPosition(code->statement_position());
     if (code->position() != RelocInfo::kNoPosition) {
-      masm->RecordPosition(code->position());
+      masm_->RecordPosition(code->position());
     }
     // Generate the code.
-    Comment cmnt(masm, code->comment());
+    Comment cmnt(masm_, code->comment());
+    masm_->bind(code->entry_label());
+    code->SaveRegisters();
     code->Generate();
-    ASSERT(code->enter()->is_bound());
+    code->RestoreRegisters();
+    masm_->jmp(code->exit_label());
   }
 }
 
diff --git a/src/codegen.h b/src/codegen.h
index 9df2b49..e1758e1 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -125,29 +125,14 @@
 
   virtual void Generate() = 0;
 
-  CodeGenerator* cgen() const { return CodeGeneratorScope::Current(); }
-
-  // Set the virtual frame for entry to the deferred code as a
-  // snapshot of the code generator's current frame (plus additional
-  // results).  This is optional, but should be done before branching
-  // or jumping to the deferred code.
-  inline void SetEntryFrame(Result* arg);
-  inline void SetEntryFrame(Result* arg0, Result* arg1);
-
-  JumpTarget* enter() { return &enter_; }
-
-  void BindExit() { exit_.Bind(0); }
-  void BindExit(Result* result) { exit_.Bind(result, 1); }
-  void BindExit(Result* result0, Result* result1) {
-    exit_.Bind(result0, result1, 2);
-  }
-  void BindExit(Result* result0, Result* result1, Result* result2) {
-    exit_.Bind(result0, result1, result2, 3);
-  }
+  MacroAssembler* masm() { return masm_; }
 
   int statement_position() const { return statement_position_; }
   int position() const { return position_; }
 
+  Label* entry_label() { return &entry_label_; }
+  Label* exit_label() { return &exit_label_; }
+
 #ifdef DEBUG
   void set_comment(const char* comment) { comment_ = comment; }
   const char* comment() const { return comment_; }
@@ -156,13 +141,35 @@
   const char* comment() const { return ""; }
 #endif
 
+  inline void Jump();
+  inline void Branch(Condition cc);
+  void BindExit() { masm_->bind(&exit_label_); }
+
+  void SaveRegisters();
+  void RestoreRegisters();
+
  protected:
-  JumpTarget enter_;
-  JumpTarget exit_;
+  MacroAssembler* masm_;
 
  private:
+  // Constants indicating special actions.  They should not be multiples
+  // of kPointerSize so they will not collide with valid offsets from
+  // the frame pointer.
+  static const int kIgnore = -1;
+  static const int kPush = 1;
+
+  // This flag is ored with a valid offset from the frame pointer, so
+  // it should fit in the low zero bits of a valid offset.
+  static const int kSyncedFlag = 2;
+
   int statement_position_;
   int position_;
+
+  Label entry_label_;
+  Label exit_label_;
+
+  int registers_[RegisterAllocator::kNumRegisters];
+
 #ifdef DEBUG
   const char* comment_;
 #endif
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 083f49b..421b676 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -36,7 +36,7 @@
   // The number of script generations tell how many GCs a script can
   // survive in the compilation cache, before it will be flushed if it
   // hasn't been used.
-  NUMBER_OF_SCRIPT_GENERATIONS = 8,
+  NUMBER_OF_SCRIPT_GENERATIONS = 5,
 
   // The compilation cache consists of tables - one for each entry
   // kind plus extras for the script generations.
diff --git a/src/compiler.cc b/src/compiler.cc
index 5632ff7..ea7c134 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -110,7 +110,22 @@
 
   ASSERT(!i::Top::global_context().is_null());
   script->set_context_data((*i::Top::global_context())->data());
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
+  if (is_eval || is_json) {
+    script->set_compilation_type(
+        is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
+                               Smi::FromInt(Script::COMPILATION_TYPE_EVAL));
+    // For eval scripts add information on the function from which eval was
+    // called.
+    if (is_eval) {
+      JavaScriptFrameIterator it;
+      script->set_eval_from_function(it.frame()->function());
+      int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
+      script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+    }
+  }
+
   // Notify debugger
   Debugger::OnBeforeCompile(script);
 #endif
diff --git a/src/d8.cc b/src/d8.cc
index f8cfff6..ee845ee 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -451,7 +451,7 @@
   i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
   i::Handle<i::Script> script_object =
       i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
-  script_object->set_type(i::Smi::FromInt(i::SCRIPT_TYPE_NATIVE));
+  script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
 
   // Create the evaluation context
   evaluation_context_ = Context::New(NULL, global_template);
diff --git a/src/d8.js b/src/d8.js
index 4c2f152..a8db9e1 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -93,6 +93,13 @@
                      Normal: 2 };
 
 
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+                                Eval: 1,
+                                JSON: 2 };
+
+
 // Current debug state.
 const kNoFrame = -1;
 Debug.State = {
@@ -963,7 +970,18 @@
           if (body[i].name) {
             result += body[i].name;
           } else {
-            result += '[unnamed] ';
+            if (body[i].compilationType == Debug.ScriptCompilationType.Eval) {
+              result += 'eval from ';
+              var script_value = response.lookup(body[i].evalFromScript.ref);
+              result += ' ' + script_value.field('name');
+              result += ':' + (body[i].evalFromLocation.line + 1);
+              result += ':' + body[i].evalFromLocation.column;
+            } else if (body[i].compilationType ==
+                       Debug.ScriptCompilationType.JSON) {
+              result += 'JSON ';
+            } else {  // body[i].compilation == Debug.ScriptCompilationType.Host
+              result += '[unnamed] ';
+            }
           }
           result += ' (lines: ';
           result += body[i].lineCount;
@@ -1126,6 +1144,15 @@
 
 
 /**
+ * Get a metadata field from a protocol value. 
+ * @return {Object} the metadata field value
+ */
+ProtocolValue.prototype.field = function(name) {
+  return this.value_[name];
+}
+
+
+/**
  * Check is the value is a primitive value.
  * @return {boolean} true if the value is primitive
  */
diff --git a/src/debug-delay.js b/src/debug-delay.js
index ea566a9..0b0501f 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -62,6 +62,12 @@
                      Extension: 1,
                      Normal: 2 };
 
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+                                Eval: 1,
+                                JSON: 2 };
+
 // The different script break point types.
 Debug.ScriptBreakPointType = { ScriptId: 0,
                                ScriptName: 1 };
diff --git a/src/debug.cc b/src/debug.cc
index 5a3722f..0daf564 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -652,7 +652,7 @@
 
   // Mark this script as native and return successfully.
   Handle<Script> script(Script::cast(function->shared()->script()));
-  script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
   return true;
 }
 
diff --git a/src/factory.cc b/src/factory.cc
index 8b20407..fad3e9c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -177,9 +177,12 @@
   script->set_column_offset(Smi::FromInt(0));
   script->set_data(Heap::undefined_value());
   script->set_context_data(Heap::undefined_value());
-  script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
+  script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
+  script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
   script->set_wrapper(*wrapper);
   script->set_line_ends(Heap::undefined_value());
+  script->set_eval_from_function(Heap::undefined_value());
+  script->set_eval_from_instructions_offset(Smi::FromInt(0));
 
   return script;
 }
diff --git a/src/frames.h b/src/frames.h
index a75befb..e250609 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -443,7 +443,8 @@
   // the sentinel as its context, it is an arguments adaptor frame. It
   // must be tagged as a small integer to avoid GC issues. Crud.
   enum {
-    SENTINEL = (1 << kSmiTagSize) | kSmiTag
+    SENTINEL = (1 << kSmiTagSize) | kSmiTag,
+    NON_SENTINEL = ~SENTINEL
   };
 
   virtual Type type() const { return ARGUMENTS_ADAPTOR; }
diff --git a/src/globals.h b/src/globals.h
index 6ac59b6..2b0fe15 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -111,11 +111,12 @@
 
 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
 
-const int kCharSize     = sizeof(char);    // NOLINT
-const int kShortSize    = sizeof(short);   // NOLINT
-const int kIntSize      = sizeof(int);     // NOLINT
-const int kDoubleSize   = sizeof(double);  // NOLINT
-const int kPointerSize  = sizeof(void*);   // NOLINT
+const int kCharSize     = sizeof(char);      // NOLINT
+const int kShortSize    = sizeof(short);     // NOLINT
+const int kIntSize      = sizeof(int);       // NOLINT
+const int kDoubleSize   = sizeof(double);    // NOLINT
+const int kPointerSize  = sizeof(void*);     // NOLINT
+const int kIntptrSize   = sizeof(intptr_t);  // NOLINT
 
 #if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
diff --git a/src/handles.cc b/src/handles.cc
index 0b9fc0f..44ca602 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -222,6 +222,12 @@
 }
 
 
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+                                   Handle<Object> key) {
+  CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
+}
+
+
 Handle<Object> IgnoreAttributesAndSetLocalProperty(
     Handle<JSObject> object,
     Handle<String> key,
@@ -231,6 +237,7 @@
       IgnoreAttributesAndSetLocalProperty(*key, *value, attributes), Object);
 }
 
+
 Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
                                           Handle<String> key,
                                           Handle<Object> value,
@@ -308,13 +315,15 @@
 
 Handle<Object> DeleteElement(Handle<JSObject> obj,
                              uint32_t index) {
-  CALL_HEAP_FUNCTION(obj->DeleteElement(index), Object);
+  CALL_HEAP_FUNCTION(obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+                     Object);
 }
 
 
 Handle<Object> DeleteProperty(Handle<JSObject> obj,
                               Handle<String> prop) {
-  CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop), Object);
+  CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+                     Object);
 }
 
 
diff --git a/src/handles.h b/src/handles.h
index 306b016..af638b8 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -119,15 +119,15 @@
   static int NumberOfHandles();
 
   // Creates a new handle with the given value.
-  static inline void** CreateHandle(void* value) {
+  static inline Object** CreateHandle(Object* value) {
     void** result = current_.next;
     if (result == current_.limit) result = Extend();
     // Update the current next field, set the value in the created
     // handle, and return the result.
     ASSERT(result < current_.limit);
     current_.next = result + 1;
-    *result = value;
-    return result;
+    *reinterpret_cast<Object**>(result) = value;
+    return reinterpret_cast<Object**>(result);
   }
 
  private:
@@ -202,6 +202,9 @@
                                 Handle<Object> value,
                                 PropertyAttributes attributes);
 
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
+                                   Handle<Object> key);
+
 Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
                                                    Handle<String> key,
                                                    Handle<Object> value,
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 3628975..434bf07 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -44,29 +44,6 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Implementation of Register
-
-Register eax = { 0 };
-Register ecx = { 1 };
-Register edx = { 2 };
-Register ebx = { 3 };
-Register esp = { 4 };
-Register ebp = { 5 };
-Register esi = { 6 };
-Register edi = { 7 };
-Register no_reg = { -1 };
-
-XMMRegister xmm0 = { 0 };
-XMMRegister xmm1 = { 1 };
-XMMRegister xmm2 = { 2 };
-XMMRegister xmm3 = { 3 };
-XMMRegister xmm4 = { 4 };
-XMMRegister xmm5 = { 5 };
-XMMRegister xmm6 = { 6 };
-XMMRegister xmm7 = { 7 };
-
-
-// -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
 // Safe default is no features.
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index e09038a..79f239d 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -79,15 +79,15 @@
   int code_;
 };
 
-extern Register eax;
-extern Register ecx;
-extern Register edx;
-extern Register ebx;
-extern Register esp;
-extern Register ebp;
-extern Register esi;
-extern Register edi;
-extern Register no_reg;
+const Register eax = { 0 };
+const Register ecx = { 1 };
+const Register edx = { 2 };
+const Register ebx = { 3 };
+const Register esp = { 4 };
+const Register ebp = { 5 };
+const Register esi = { 6 };
+const Register edi = { 7 };
+const Register no_reg = { -1 };
 
 
 struct XMMRegister {
@@ -100,14 +100,14 @@
   int code_;
 };
 
-extern XMMRegister xmm0;
-extern XMMRegister xmm1;
-extern XMMRegister xmm2;
-extern XMMRegister xmm3;
-extern XMMRegister xmm4;
-extern XMMRegister xmm5;
-extern XMMRegister xmm6;
-extern XMMRegister xmm7;
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
 
 enum Condition {
   // any value < 0 is considered no_condition
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
new file mode 100644
index 0000000..49c706d
--- /dev/null
+++ b/src/ia32/codegen-ia32-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_IA32_CODEGEN_IA32_INL_H_
+#define V8_IA32_CODEGEN_IA32_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index c72c126..e9e4061 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -42,6 +42,35 @@
 #define __ ACCESS_MASM(masm_)
 
 // -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ push(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+      __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
+    }
+  }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  // Restore registers in reverse order due to the stack.
+  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+    int action = registers_[i];
+    if (action == kPush) {
+      __ pop(RegisterAllocator::ToRegister(i));
+    } else if (action != kIgnore) {
+      action &= ~kSyncedFlag;
+      __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
+    }
+  }
+}
+
+
+// -------------------------------------------------------------------------
 // CodeGenState implementation.
 
 CodeGenState::CodeGenState(CodeGenerator* owner)
@@ -73,7 +102,8 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(int buffer_size,
+                             Handle<Script> script,
                              bool is_eval)
     : is_eval_(is_eval),
       script_(script),
@@ -776,38 +806,35 @@
 }
 
 
-// A deferred code class implementing binary operations on likely smis.
-// This class generates both inline code and deferred code.
-// The fastest path is implemented inline.  Deferred code calls
-// the GenericBinaryOpStub stub for slow cases.
+// Call the specialized stub for a binary operation.
 class DeferredInlineBinaryOperation: public DeferredCode {
  public:
   DeferredInlineBinaryOperation(Token::Value op,
-                                OverwriteMode mode,
-                                GenericBinaryFlags flags)
-      : stub_(op, mode, flags), op_(op) {
+                                Register dst,
+                                Register left,
+                                Register right,
+                                OverwriteMode mode)
+      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
     set_comment("[ DeferredInlineBinaryOperation");
   }
 
-  // Consumes its arguments, left and right, leaving them invalid.
-  Result GenerateInlineCode(Result* left, Result* right);
-
   virtual void Generate();
 
  private:
-  GenericBinaryOpStub stub_;
   Token::Value op_;
+  Register dst_;
+  Register left_;
+  Register right_;
+  OverwriteMode mode_;
 };
 
 
 void DeferredInlineBinaryOperation::Generate() {
-  Result left;
-  Result right;
-  enter()->Bind(&left, &right);
-  cgen()->frame()->Push(&left);
-  cgen()->frame()->Push(&right);
-  Result answer = cgen()->frame()->CallStub(&stub_, 2);
-  exit_.Jump(&answer);
+  __ push(left_);
+  __ push(right_);
+  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
@@ -1003,28 +1030,342 @@
 }
 
 
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
                                              Result* left,
                                              Result* right,
                                              OverwriteMode overwrite_mode) {
-  // Implements a binary operation using a deferred code object
-  // and some inline code to operate on smis quickly.
+  // Special handling of div and mod because they use fixed registers.
+  if (op == Token::DIV || op == Token::MOD) {
+    // We need eax as the quotient register, edx as the remainder
+    // register, neither left nor right in eax or edx, and left copied
+    // to eax.
+    Result quotient;
+    Result remainder;
+    bool left_is_in_eax = false;
+    // Step 1: get eax for quotient.
+    if ((left->is_register() && left->reg().is(eax)) ||
+        (right->is_register() && right->reg().is(eax))) {
+      // One or both is in eax.  Use a fresh non-edx register for
+      // them.
+      Result fresh = allocator_->Allocate();
+      ASSERT(fresh.is_valid());
+      if (fresh.reg().is(edx)) {
+        remainder = fresh;
+        fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+      }
+      if (left->is_register() && left->reg().is(eax)) {
+        quotient = *left;
+        *left = fresh;
+        left_is_in_eax = true;
+      }
+      if (right->is_register() && right->reg().is(eax)) {
+        quotient = *right;
+        *right = fresh;
+      }
+      __ mov(fresh.reg(), eax);
+    } else {
+      // Neither left nor right is in eax.
+      quotient = allocator_->Allocate(eax);
+    }
+    ASSERT(quotient.is_register() && quotient.reg().is(eax));
+    ASSERT(!(left->is_register() && left->reg().is(eax)));
+    ASSERT(!(right->is_register() && right->reg().is(eax)));
+
+    // Step 2: get edx for remainder if necessary.
+    if (!remainder.is_valid()) {
+      if ((left->is_register() && left->reg().is(edx)) ||
+          (right->is_register() && right->reg().is(edx))) {
+        Result fresh = allocator_->Allocate();
+        ASSERT(fresh.is_valid());
+        if (left->is_register() && left->reg().is(edx)) {
+          remainder = *left;
+          *left = fresh;
+        }
+        if (right->is_register() && right->reg().is(edx)) {
+          remainder = *right;
+          *right = fresh;
+        }
+        __ mov(fresh.reg(), edx);
+      } else {
+        // Neither left nor right is in edx.
+        remainder = allocator_->Allocate(edx);
+      }
+    }
+    ASSERT(remainder.is_register() && remainder.reg().is(edx));
+    ASSERT(!(left->is_register() && left->reg().is(edx)));
+    ASSERT(!(right->is_register() && right->reg().is(edx)));
+
+    left->ToRegister();
+    right->ToRegister();
+    frame_->Spill(eax);
+    frame_->Spill(edx);
+
+    // Check that left and right are smi tagged.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          (op == Token::DIV) ? eax : edx,
+                                          left->reg(),
+                                          right->reg(),
+                                          overwrite_mode);
+    if (left->reg().is(right->reg())) {
+      __ test(left->reg(), Immediate(kSmiTagMask));
+    } else {
+      // Use the quotient register as a scratch for the tag check.
+      if (!left_is_in_eax) __ mov(eax, left->reg());
+      left_is_in_eax = false;  // About to destroy the value in eax.
+      __ or_(eax, Operand(right->reg()));
+      ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+      __ test(eax, Immediate(kSmiTagMask));
+    }
+    deferred->Branch(not_zero);
+
+    if (!left_is_in_eax) __ mov(eax, left->reg());
+    // Sign extend eax into edx:eax.
+    __ cdq();
+    // Check for 0 divisor.
+    __ test(right->reg(), Operand(right->reg()));
+    deferred->Branch(zero);
+    // Divide edx:eax by the right operand.
+    __ idiv(right->reg());
+
+    // Complete the operation.
+    if (op == Token::DIV) {
+      // Check for negative zero result.  If result is zero, and divisor
+      // is negative, return a floating point negative zero.  The
+      // virtual frame is unchanged in this block, so local control flow
+      // can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(left->reg(), Operand(left->reg()));
+      __ j(not_zero, &non_zero_result);
+      __ test(right->reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by
+      // idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      deferred->Branch(equal);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      deferred->Branch(not_zero);
+      // Tag the result and store it in the quotient register.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&quotient);
+    } else {
+      ASSERT(op == Token::MOD);
+      // Check for a negative zero result.  If the result is zero, and
+      // the dividend is negative, return a floating point negative
+      // zero.  The frame is unchanged in this block, so local control
+      // flow can use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &non_zero_result, taken);
+      __ test(left->reg(), Operand(left->reg()));
+      deferred->Branch(negative);
+      __ bind(&non_zero_result);
+      deferred->BindExit();
+      left->Unuse();
+      right->Unuse();
+      frame_->Push(&remainder);
+    }
+    return;
+  }
+
+  // Special handling of shift operations because they use fixed
+  // registers.
+  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+    // Move left out of ecx if necessary.
+    if (left->is_register() && left->reg().is(ecx)) {
+      *left = allocator_->Allocate();
+      ASSERT(left->is_valid());
+      __ mov(left->reg(), ecx);
+    }
+    right->ToRegister(ecx);
+    left->ToRegister();
+    ASSERT(left->is_register() && !left->reg().is(ecx));
+    ASSERT(right->is_register() && right->reg().is(ecx));
+
+    // We will modify right, it must be spilled.
+    frame_->Spill(ecx);
+
+    // Use a fresh answer register to avoid spilling the left operand.
+    Result answer = allocator_->Allocate();
+    ASSERT(answer.is_valid());
+    // Check that both operands are smis using the answer register as a
+    // temporary.
+    DeferredInlineBinaryOperation* deferred =
+        new DeferredInlineBinaryOperation(op,
+                                          answer.reg(),
+                                          left->reg(),
+                                          ecx,
+                                          overwrite_mode);
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(ecx));
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(not_zero);
+
+    // Untag both operands.
+    __ mov(answer.reg(), left->reg());
+    __ sar(answer.reg(), kSmiTagSize);
+    __ sar(ecx, kSmiTagSize);
+    // Perform the operation.
+    switch (op) {
+      case Token::SAR:
+        __ sar(answer.reg());
+        // No checks of result necessary
+        break;
+      case Token::SHR: {
+        Label result_ok;
+        __ shr(answer.reg());
+        // Check that the *unsigned* result fits in a smi.  Neither of
+        // the two high-order bits can be set:
+        //  * 0x80000000: high bit would be lost when smi tagging.
+        //  * 0x40000000: this number would convert to negative when smi
+        //    tagging.
+        // These two cases can only happen with shifts by 0 or 1 when
+        // handed a valid smi.  If the answer cannot be represented by a
+        // smi, restore the left and right arguments, and jump to slow
+        // case.  The low bit of the left argument may be lost, but only
+        // in a case where it is dropped anyway.
+        __ test(answer.reg(), Immediate(0xc0000000));
+        __ j(zero, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      case Token::SHL: {
+        Label result_ok;
+        __ shl(answer.reg());
+        // Check that the *signed* result fits in a smi.
+        __ cmp(answer.reg(), 0xc0000000);
+        __ j(positive, &result_ok);
+        ASSERT(kSmiTag == 0);
+        __ shl(ecx, kSmiTagSize);
+        deferred->Jump();
+        __ bind(&result_ok);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    // Smi-tag the result in answer.
+    ASSERT(kSmiTagSize == 1);  // Adjust code if not the case.
+    __ lea(answer.reg(),
+           Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+    deferred->BindExit();
+    left->Unuse();
+    right->Unuse();
+    frame_->Push(&answer);
+    return;
+  }
+
+  // Handle the other binary operations.
+  left->ToRegister();
+  right->ToRegister();
+  // A newly allocated register answer is used to hold the answer.  The
+  // registers containing left and right are not modified so they don't
+  // need to be spilled in the fast case.
+  Result answer = allocator_->Allocate();
+  ASSERT(answer.is_valid());
+
+  // Perform the smi tag check.
   DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op, overwrite_mode, SMI_CODE_INLINED);
-  // Generate the inline code that handles some smi operations,
-  // and jumps to the deferred code for everything else.
-  Result answer = deferred->GenerateInlineCode(left, right);
-  deferred->BindExit(&answer);
+      new DeferredInlineBinaryOperation(op,
+                                        answer.reg(),
+                                        left->reg(),
+                                        right->reg(),
+                                        overwrite_mode);
+  if (left->reg().is(right->reg())) {
+    __ test(left->reg(), Immediate(kSmiTagMask));
+  } else {
+    __ mov(answer.reg(), left->reg());
+    __ or_(answer.reg(), Operand(right->reg()));
+    ASSERT(kSmiTag == 0);  // Adjust test if not the case.
+    __ test(answer.reg(), Immediate(kSmiTagMask));
+  }
+  deferred->Branch(not_zero);
+  __ mov(answer.reg(), left->reg());
+  switch (op) {
+    case Token::ADD:
+      __ add(answer.reg(), Operand(right->reg()));  // Add optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::SUB:
+      __ sub(answer.reg(), Operand(right->reg()));  // Subtract optimistically.
+      deferred->Branch(overflow);
+      break;
+
+    case Token::MUL: {
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // Remove smi tag from the left operand (but keep sign).
+      // Left-hand operand has been copied into answer.
+      __ sar(answer.reg(), kSmiTagSize);
+      // Do multiplication of smis, leaving result in answer.
+      __ imul(answer.reg(), Operand(right->reg()));
+      // Go slow on overflows.
+      deferred->Branch(overflow);
+      // Check for negative zero result.  If product is zero, and one
+      // argument is negative, go to slow case.  The frame is unchanged
+      // in this block, so local control flow can use a Label rather
+      // than a JumpTarget.
+      Label non_zero_result;
+      __ test(answer.reg(), Operand(answer.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ mov(answer.reg(), left->reg());
+      __ or_(answer.reg(), Operand(right->reg()));
+      deferred->Branch(negative);
+      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
+      __ bind(&non_zero_result);
+      break;
+    }
+
+    case Token::BIT_OR:
+      __ or_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(answer.reg(), Operand(right->reg()));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(answer.reg(), Operand(right->reg()));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+  deferred->BindExit();
+  left->Unuse();
+  right->Unuse();
   frame_->Push(&answer);
 }
 
 
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
 class DeferredInlineSmiOperation: public DeferredCode {
  public:
   DeferredInlineSmiOperation(Token::Value op,
+                             Register dst,
+                             Register src,
                              Smi* value,
                              OverwriteMode overwrite_mode)
       : op_(op),
+        dst_(dst),
+        src_(src),
         value_(value),
         overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiOperation");
@@ -1034,29 +1375,35 @@
 
  private:
   Token::Value op_;
+  Register dst_;
+  Register src_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
 void DeferredInlineSmiOperation::Generate() {
-  Result left;
-  enter()->Bind(&left);
-  cgen()->frame()->Push(&left);
-  cgen()->frame()->Push(value_);
-  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
+  __ push(src_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&stub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
 class DeferredInlineSmiOperationReversed: public DeferredCode {
  public:
   DeferredInlineSmiOperationReversed(Token::Value op,
+                                     Register dst,
                                      Smi* value,
+                                     Register src,
                                      OverwriteMode overwrite_mode)
       : op_(op),
+        dst_(dst),
         value_(value),
+        src_(src),
         overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiOperationReversed");
   }
@@ -1065,152 +1412,116 @@
 
  private:
   Token::Value op_;
+  Register dst_;
   Smi* value_;
+  Register src_;
   OverwriteMode overwrite_mode_;
 };
 
 
 void DeferredInlineSmiOperationReversed::Generate() {
-  Result right;
-  enter()->Bind(&right);
-  cgen()->frame()->Push(value_);
-  cgen()->frame()->Push(&right);
+  __ push(Immediate(value_));
+  __ push(src_);
   GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
+// The result of src + value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
 class DeferredInlineSmiAdd: public DeferredCode {
  public:
-  DeferredInlineSmiAdd(Smi* value,
+  DeferredInlineSmiAdd(Register dst,
+                       Smi* value,
                        OverwriteMode overwrite_mode)
-      : value_(value),
-        overwrite_mode_(overwrite_mode) {
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiAdd");
   }
 
   virtual void Generate();
 
  private:
+  Register dst_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
+void DeferredInlineSmiAdd::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of value + src is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative addition and call the appropriate
+// specialized stub for add.  The result is left in dst.
 class DeferredInlineSmiAddReversed: public DeferredCode {
  public:
-  DeferredInlineSmiAddReversed(Smi* value,
+  DeferredInlineSmiAddReversed(Register dst,
+                               Smi* value,
                                OverwriteMode overwrite_mode)
-      : value_(value),
-        overwrite_mode_(overwrite_mode) {
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiAddReversed");
   }
 
   virtual void Generate();
 
  private:
+  Register dst_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
+void DeferredInlineSmiAddReversed::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  __ sub(Operand(dst_), Immediate(value_));
+  __ push(Immediate(value_));
+  __ push(dst_);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+}
+
+
+// The result of src - value is in dst.  It either overflowed or was not
+// smi tagged.  Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract.  The result is left in
+// dst.
 class DeferredInlineSmiSub: public DeferredCode {
  public:
-  DeferredInlineSmiSub(Smi* value,
+  DeferredInlineSmiSub(Register dst,
+                       Smi* value,
                        OverwriteMode overwrite_mode)
-      : value_(value),
-        overwrite_mode_(overwrite_mode) {
+      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiSub");
   }
 
   virtual void Generate();
 
  private:
+  Register dst_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-#undef __
-#define __ ACCESS_MASM(cgen()->masm())
-
-
-void DeferredInlineSmiAdd::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  Result left;  // Initially left + value_.
-  enter()->Bind(&left);
-  left.ToRegister();
-  cgen()->frame()->Spill(left.reg());
-  __ sub(Operand(left.reg()), Immediate(value_));
-  cgen()->frame()->Push(&left);
-  cgen()->frame()->Push(value_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
-}
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  Result right;  // Initially value_ + right.
-  enter()->Bind(&right);
-  right.ToRegister();
-  cgen()->frame()->Spill(right.reg());
-  __ sub(Operand(right.reg()), Immediate(value_));
-  cgen()->frame()->Push(value_);
-  cgen()->frame()->Push(&right);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
-}
-
-
 void DeferredInlineSmiSub::Generate() {
   // Undo the optimistic sub operation and call the shared stub.
-  Result left;  // Initially left - value_.
-  enter()->Bind(&left);
-  left.ToRegister();
-  cgen()->frame()->Spill(left.reg());
-  __ add(Operand(left.reg()), Immediate(value_));
-  cgen()->frame()->Push(&left);
-  cgen()->frame()->Push(value_);
+  __ add(Operand(dst_), Immediate(value_));
+  __ push(dst_);
+  __ push(Immediate(value_));
   GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-
-class DeferredInlineSmiSubReversed: public DeferredCode {
- public:
-  DeferredInlineSmiSubReversed(Smi* value,
-                               OverwriteMode overwrite_mode)
-      : value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiSubReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSubReversed::Generate() {
-  // Call the shared stub.
-  Result right;
-  enter()->Bind(&right);
-  cgen()->frame()->Push(value_);
-  cgen()->frame()->Push(&right);
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  Result answer = cgen()->frame()->CallStub(&igostub, 2);
-  exit_.Jump(&answer);
+  __ CallStub(&igostub);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
@@ -1248,19 +1559,24 @@
     case Token::ADD: {
       operand->ToRegister();
       frame_->Spill(operand->reg());
-      __ add(Operand(operand->reg()), Immediate(value));
 
+      // Optimistically add.  Call the specialized add stub if the
+      // result is not a smi or overflows.
       DeferredCode* deferred = NULL;
       if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(smi_value, overwrite_mode);
+        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+                                                    smi_value,
+                                                    overwrite_mode);
       } else {
-        deferred = new DeferredInlineSmiAdd(smi_value, overwrite_mode);
+        deferred = new DeferredInlineSmiAdd(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
       }
-      deferred->SetEntryFrame(operand);
-      deferred->enter()->Branch(overflow, operand, not_taken);
+      __ add(Operand(operand->reg()), Immediate(value));
+      deferred->Branch(overflow);
       __ test(operand->reg(), Immediate(kSmiTagMask));
-      deferred->enter()->Branch(not_zero, operand, not_taken);
-      deferred->BindExit(operand);
+      deferred->Branch(not_zero);
+      deferred->BindExit();
       frame_->Push(operand);
       break;
     }
@@ -1269,31 +1585,37 @@
       DeferredCode* deferred = NULL;
       Result answer;  // Only allocate a new register if reversed.
       if (reversed) {
+        // The reversed case is only hit when the right operand is not a
+        // constant.
+        ASSERT(operand->is_register());
         answer = allocator()->Allocate();
         ASSERT(answer.is_valid());
-        deferred = new DeferredInlineSmiSubReversed(smi_value, overwrite_mode);
         __ Set(answer.reg(), Immediate(value));
-        // We are in the reversed case so they can't both be Smi constants.
-        ASSERT(operand->is_register());
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          answer.reg(),
+                                                          smi_value,
+                                                          operand->reg(),
+                                                          overwrite_mode);
         __ sub(answer.reg(), Operand(operand->reg()));
       } else {
         operand->ToRegister();
         frame_->Spill(operand->reg());
-        deferred = new DeferredInlineSmiSub(smi_value, overwrite_mode);
-        __ sub(Operand(operand->reg()), Immediate(value));
         answer = *operand;
+        deferred = new DeferredInlineSmiSub(operand->reg(),
+                                            smi_value,
+                                            overwrite_mode);
+        __ sub(Operand(operand->reg()), Immediate(value));
       }
-      deferred->SetEntryFrame(operand);
-      deferred->enter()->Branch(overflow, operand, not_taken);
+      deferred->Branch(overflow);
       __ test(answer.reg(), Immediate(kSmiTagMask));
-      deferred->enter()->Branch(not_zero, operand, not_taken);
+      deferred->Branch(not_zero);
+      deferred->BindExit();
       operand->Unuse();
-      deferred->BindExit(&answer);
       frame_->Push(&answer);
       break;
     }
 
-    case Token::SAR: {
+    case Token::SAR:
       if (reversed) {
         Result constant_operand(value);
         LikelySmiBinaryOperation(op, &constant_operand, operand,
@@ -1302,23 +1624,26 @@
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
         int shift_value = int_value & 0x1f;
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
         operand->ToRegister();
+        frame_->Spill(operand->reg());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
         __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->enter()->Branch(not_zero, operand, not_taken);
+        deferred->Branch(not_zero);
         if (shift_value > 0) {
-          frame_->Spill(operand->reg());
           __ sar(operand->reg(), shift_value);
           __ and_(operand->reg(), ~kSmiTagMask);
         }
-        deferred->BindExit(operand);
+        deferred->BindExit();
         frame_->Push(operand);
       }
       break;
-    }
 
-    case Token::SHR: {
+    case Token::SHR:
       if (reversed) {
         Result constant_operand(value);
         LikelySmiBinaryOperation(op, &constant_operand, operand,
@@ -1327,32 +1652,35 @@
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
         int shift_value = int_value & 0x1f;
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
         operand->ToRegister();
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->enter()->Branch(not_zero, operand, not_taken);
         Result answer = allocator()->Allocate();
         ASSERT(answer.is_valid());
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           answer.reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
         __ mov(answer.reg(), operand->reg());
         __ sar(answer.reg(), kSmiTagSize);
         __ shr(answer.reg(), shift_value);
         // A negative Smi shifted right two is in the positive Smi range.
         if (shift_value < 2) {
           __ test(answer.reg(), Immediate(0xc0000000));
-          deferred->enter()->Branch(not_zero, operand, not_taken);
+          deferred->Branch(not_zero);
         }
         operand->Unuse();
         ASSERT(kSmiTagSize == times_2);  // Adjust the code if not true.
         __ lea(answer.reg(),
                Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
-        deferred->BindExit(&answer);
+        deferred->BindExit();
         frame_->Push(&answer);
       }
       break;
-    }
 
-    case Token::SHL: {
+    case Token::SHL:
       if (reversed) {
         Result constant_operand(value);
         LikelySmiBinaryOperation(op, &constant_operand, operand,
@@ -1361,14 +1689,30 @@
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
         int shift_value = int_value & 0x1f;
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
         operand->ToRegister();
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->enter()->Branch(not_zero, operand, not_taken);
-        if (shift_value != 0) {
+        if (shift_value == 0) {
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          deferred->BindExit();
+          frame_->Push(operand);
+        } else {
+          // Use a fresh temporary for nonzero shift values.
           Result answer = allocator()->Allocate();
           ASSERT(answer.is_valid());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             answer.reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
           __ mov(answer.reg(), operand->reg());
           ASSERT(kSmiTag == 0);  // adjust code if not the case
           // We do no shifts, only the Smi conversion, if shift_value is 1.
@@ -1376,35 +1720,37 @@
             __ shl(answer.reg(), shift_value - 1);
           }
           // Convert int result to Smi, checking that it is in int range.
-          ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
           __ add(answer.reg(), Operand(answer.reg()));
-          deferred->enter()->Branch(overflow, operand, not_taken);
+          deferred->Branch(overflow);
+          deferred->BindExit();
           operand->Unuse();
-          deferred->BindExit(&answer);
           frame_->Push(&answer);
-        } else {
-          deferred->BindExit(operand);
-          frame_->Push(operand);
         }
       }
       break;
-    }
 
     case Token::BIT_OR:
     case Token::BIT_XOR:
     case Token::BIT_AND: {
+      operand->ToRegister();
+      frame_->Spill(operand->reg());
       DeferredCode* deferred = NULL;
       if (reversed) {
-        deferred = new DeferredInlineSmiOperationReversed(op, smi_value,
+        deferred = new DeferredInlineSmiOperationReversed(op,
+                                                          operand->reg(),
+                                                          smi_value,
+                                                          operand->reg(),
                                                           overwrite_mode);
       } else {
-        deferred =  new DeferredInlineSmiOperation(op, smi_value,
+        deferred =  new DeferredInlineSmiOperation(op,
+                                                   operand->reg(),
+                                                   operand->reg(),
+                                                   smi_value,
                                                    overwrite_mode);
       }
-      operand->ToRegister();
       __ test(operand->reg(), Immediate(kSmiTagMask));
-      deferred->enter()->Branch(not_zero, operand, not_taken);
-      frame_->Spill(operand->reg());
+      deferred->Branch(not_zero);
       if (op == Token::BIT_AND) {
         __ and_(Operand(operand->reg()), Immediate(value));
       } else if (op == Token::BIT_XOR) {
@@ -1417,7 +1763,7 @@
           __ or_(Operand(operand->reg()), Immediate(value));
         }
       }
-      deferred->BindExit(operand);
+      deferred->BindExit();
       frame_->Push(operand);
       break;
     }
@@ -1684,7 +2030,7 @@
 
 class DeferredStackCheck: public DeferredCode {
  public:
-  explicit DeferredStackCheck() {
+  DeferredStackCheck() {
     set_comment("[ DeferredStackCheck");
   }
 
@@ -1693,11 +2039,8 @@
 
 
 void DeferredStackCheck::Generate() {
-  enter()->Bind();
   StackCheckStub stub;
-  Result ignored = cgen()->frame()->CallStub(&stub, 0);
-  ignored.Unuse();
-  exit_.Jump();
+  __ CallStub(&stub);
 }
 
 
@@ -1707,7 +2050,7 @@
     ExternalReference stack_guard_limit =
         ExternalReference::address_of_stack_guard_limit();
     __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
-    deferred->enter()->Branch(below, not_taken);
+    deferred->Branch(below);
     deferred->BindExit();
   }
 }
@@ -3559,43 +3902,45 @@
 }
 
 
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function.  Leave the regexp boilerplate in
+// 'boilerplate'.
 class DeferredRegExpLiteral: public DeferredCode {
  public:
-  explicit DeferredRegExpLiteral(RegExpLiteral* node) : node_(node) {
+  DeferredRegExpLiteral(Register boilerplate,
+                        Register literals,
+                        RegExpLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
     set_comment("[ DeferredRegExpLiteral");
   }
 
-  virtual void Generate();
+  void Generate();
 
  private:
+  Register boilerplate_;
+  Register literals_;
   RegExpLiteral* node_;
 };
 
 
 void DeferredRegExpLiteral::Generate() {
-  Result literals;
-  enter()->Bind(&literals);
   // Since the entry is undefined we call the runtime system to
   // compute the literal.
-
-  VirtualFrame* frame = cgen()->frame();
   // Literal array (0).
-  frame->Push(&literals);
+  __ push(literals_);
   // Literal index (1).
-  frame->Push(Smi::FromInt(node_->literal_index()));
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
   // RegExp pattern (2).
-  frame->Push(node_->pattern());
+  __ push(Immediate(node_->pattern()));
   // RegExp flags (3).
-  frame->Push(node_->flags());
-  Result boilerplate =
-      frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  exit_.Jump(&boilerplate);
+  __ push(Immediate(node_->flags()));
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
 }
 
 
 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
   Comment cmnt(masm_, "[ RegExp Literal");
-  DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(node);
 
   // Retrieve the literals array and check the allocated entry.  Begin
   // with a writable copy of the function of this activation in a
@@ -3610,65 +3955,63 @@
          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   Result boilerplate = allocator_->Allocate();
   ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
 
   // Check whether we need to materialize the RegExp object.  If so,
   // jump to the deferred code passing the literals array.
+  DeferredRegExpLiteral* deferred =
+      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
   __ cmp(boilerplate.reg(), Factory::undefined_value());
-  deferred->enter()->Branch(equal, &literals, not_taken);
-
+  deferred->Branch(equal);
+  deferred->BindExit();
   literals.Unuse();
-  // The deferred code returns the boilerplate object.
-  deferred->BindExit(&boilerplate);
 
   // Push the boilerplate object.
   frame_->Push(&boilerplate);
 }
 
 
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateObjectLiteral.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function.  Leave the object boilerplate in
+// 'boilerplate'.
 class DeferredObjectLiteral: public DeferredCode {
  public:
-  explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
+  DeferredObjectLiteral(Register boilerplate,
+                        Register literals,
+                        ObjectLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
     set_comment("[ DeferredObjectLiteral");
   }
 
-  virtual void Generate();
+  void Generate();
 
  private:
+  Register boilerplate_;
+  Register literals_;
   ObjectLiteral* node_;
 };
 
 
 void DeferredObjectLiteral::Generate() {
-  Result literals;
-  enter()->Bind(&literals);
   // Since the entry is undefined we call the runtime system to
   // compute the literal.
-
-  VirtualFrame* frame = cgen()->frame();
   // Literal array (0).
-  frame->Push(&literals);
+  __ push(literals_);
   // Literal index (1).
-  frame->Push(Smi::FromInt(node_->literal_index()));
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
   // Constant properties (2).
-  frame->Push(node_->constant_properties());
-  Result boilerplate =
-      frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  exit_.Jump(&boilerplate);
+  __ push(Immediate(node_->constant_properties()));
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
 }
 
 
 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
 
   // Retrieve the literals array and check the allocated entry.  Begin
   // with a writable copy of the function of this activation in a
@@ -3683,20 +4026,20 @@
          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   Result boilerplate = allocator_->Allocate();
   ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
 
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code passing the literals array.
+  DeferredObjectLiteral* deferred =
+      new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
   __ cmp(boilerplate.reg(), Factory::undefined_value());
-  deferred->enter()->Branch(equal, &literals, not_taken);
-
+  deferred->Branch(equal);
+  deferred->BindExit();
   literals.Unuse();
-  // The deferred code returns the boilerplate object.
-  deferred->BindExit(&boilerplate);
 
   // Push the boilerplate object.
   frame_->Push(&boilerplate);
@@ -3766,45 +4109,42 @@
 }
 
 
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateArrayLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function.  Leave the array boilerplate in 'boilerplate'.
 class DeferredArrayLiteral: public DeferredCode {
  public:
-  explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
+  DeferredArrayLiteral(Register boilerplate,
+                       Register literals,
+                       ArrayLiteral* node)
+      : boilerplate_(boilerplate), literals_(literals), node_(node) {
     set_comment("[ DeferredArrayLiteral");
   }
 
-  virtual void Generate();
+  void Generate();
 
  private:
+  Register boilerplate_;
+  Register literals_;
   ArrayLiteral* node_;
 };
 
 
 void DeferredArrayLiteral::Generate() {
-  Result literals;
-  enter()->Bind(&literals);
   // Since the entry is undefined we call the runtime system to
   // compute the literal.
-
-  VirtualFrame* frame = cgen()->frame();
   // Literal array (0).
-  frame->Push(&literals);
+  __ push(literals_);
   // Literal index (1).
-  frame->Push(Smi::FromInt(node_->literal_index()));
+  __ push(Immediate(Smi::FromInt(node_->literal_index())));
   // Constant properties (2).
-  frame->Push(node_->literals());
-  Result boilerplate =
-      frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-  exit_.Jump(&boilerplate);
+  __ push(Immediate(node_->literals()));
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
 }
 
 
 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
   Comment cmnt(masm_, "[ ArrayLiteral");
-  DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
 
   // Retrieve the literals array and check the allocated entry.  Begin
   // with a writable copy of the function of this activation in a
@@ -3819,24 +4159,23 @@
          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   Result boilerplate = allocator_->Allocate();
   ASSERT(boilerplate.is_valid());
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
 
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code passing the literals array.
+  DeferredArrayLiteral* deferred =
+      new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
   __ cmp(boilerplate.reg(), Factory::undefined_value());
-  deferred->enter()->Branch(equal, &literals, not_taken);
-
+  deferred->Branch(equal);
+  deferred->BindExit();
   literals.Unuse();
-  // The deferred code returns the boilerplate object.
-  deferred->BindExit(&boilerplate);
 
-  // Push the resulting array literal on the stack.
+  // Push the resulting array literal boilerplate on the stack.
   frame_->Push(&boilerplate);
-
   // Clone the boilerplate object.
   Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
   if (node->depth() == 1) {
@@ -4757,63 +5096,90 @@
 }
 
 
-class DeferredCountOperation: public DeferredCode {
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub.  The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
  public:
-  DeferredCountOperation(bool is_postfix,
-                         bool is_increment,
-                         int target_size)
-      : is_postfix_(is_postfix),
-        is_increment_(is_increment),
-        target_size_(target_size) {
+  DeferredPrefixCountOperation(Register dst, bool is_increment)
+      : dst_(dst), is_increment_(is_increment) {
     set_comment("[ DeferredCountOperation");
   }
 
   virtual void Generate();
 
  private:
-  bool is_postfix_;
+  Register dst_;
   bool is_increment_;
-  int target_size_;
 };
 
 
-#undef __
-#define __ ACCESS_MASM(cgen()->masm())
-
-
-void DeferredCountOperation::Generate() {
-  Result value;
-  enter()->Bind(&value);
-  VirtualFrame* frame = cgen()->frame();
+void DeferredPrefixCountOperation::Generate() {
   // Undo the optimistic smi operation.
-  value.ToRegister();
-  frame->Spill(value.reg());
   if (is_increment_) {
-    __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
   } else {
-    __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
   }
-  frame->Push(&value);
-  value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1);
-  frame->Push(&value);
-  if (is_postfix_) {  // Fix up copy of old value with ToNumber(value).
-    // This is only safe because VisitCountOperation makes this frame slot
-    // beneath the reference a register, which is spilled at the above call.
-    // We cannot safely write to constants or copies below the water line.
-    frame->StoreToElementAt(target_size_ + 1);
-  }
-  frame->Push(Smi::FromInt(1));
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
   if (is_increment_) {
-    value = frame->CallRuntime(Runtime::kNumberAdd, 2);
+    __ CallRuntime(Runtime::kNumberAdd, 2);
   } else {
-    value = frame->CallRuntime(Runtime::kNumberSub, 2);
+    __ CallRuntime(Runtime::kNumberSub, 2);
   }
-  exit_.Jump(&value);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
-#undef __
-#define __ ACCESS_MASM(masm_)
+// The value in dst was optimistically incremented or decremented.  The
+// result overflowed or was not smi tagged.  Undo the operation and call
+// into the runtime to convert the argument to a number.  Update the
+// original value in old.  Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+      : dst_(dst), old_(old), is_increment_(is_increment) {
+    set_comment("[ DeferredCountOperation");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_;
+  Register old_;
+  bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+  // Undo the optimistic smi operation.
+  if (is_increment_) {
+    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
+  } else {
+    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
+  }
+  __ push(dst_);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+  // Save the result of ToNumber to use as the old value.
+  __ push(eax);
+
+  // Call the runtime for the addition or subtraction.
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(1)));
+  if (is_increment_) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(old_);
+}
 
 
 void CodeGenerator::VisitCountOperation(CountOperation* node) {
@@ -4825,96 +5191,93 @@
   Variable* var = node->expression()->AsVariableProxy()->AsVariable();
   bool is_const = (var != NULL && var->mode() == Variable::CONST);
 
-  // Postfix operators need a stack slot under the reference to hold
-  // the old value while the new one is being stored.
-  if (is_postfix) {
-    frame_->Push(Smi::FromInt(0));
-  }
+  // Postfix operations need a stack slot under the reference to hold
+  // the old value while the new value is being stored.  This is so that
+  // in the case that storing the new value requires a call, the old
+  // value will be in the frame to be spilled.
+  if (is_postfix) frame_->Push(Smi::FromInt(0));
 
   { Reference target(this, node->expression());
     if (target.is_illegal()) {
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
-      if (!is_postfix) {
-        frame_->Push(Smi::FromInt(0));
-      }
+      if (!is_postfix) frame_->Push(Smi::FromInt(0));
       return;
     }
     target.TakeValue(NOT_INSIDE_TYPEOF);
 
-    DeferredCountOperation* deferred =
-        new DeferredCountOperation(is_postfix, is_increment, target.size());
+    Result new_value = frame_->Pop();
+    new_value.ToRegister();
 
-    Result value = frame_->Pop();
-    value.ToRegister();
-
-    // Postfix: Store the old value as the result.
+    Result old_value;  // Only allocated in the postfix case.
     if (is_postfix) {
-      // Explicitly back the slot for the old value with a new register.
-      // This improves performance in some cases.
-      Result old_value = allocator_->Allocate();
+      // Allocate a temporary to preserve the old value.
+      old_value = allocator_->Allocate();
       ASSERT(old_value.is_valid());
-      __ mov(old_value.reg(), value.reg());
-      // SetElement must not create a constant element or a copy in this slot,
-      // since we will write to it, below the waterline, in deferred code.
-      frame_->SetElementAt(target.size(), &old_value);
+      __ mov(old_value.reg(), new_value.reg());
     }
+    // Ensure the new value is writable.
+    frame_->Spill(new_value.reg());
 
-    // Perform optimistic increment/decrement.  Ensure the value is
-    // writable.
-    frame_->Spill(value.reg());
-    ASSERT(allocator_->count(value.reg()) == 1);
-
-    // In order to combine the overflow and the smi check, we need to
-    // be able to allocate a byte register.  We attempt to do so
-    // without spilling.  If we fail, we will generate separate
-    // overflow and smi checks.
+    // In order to combine the overflow and the smi tag check, we need
+    // to be able to allocate a byte register.  We attempt to do so
+    // without spilling.  If we fail, we will generate separate overflow
+    // and smi tag checks.
     //
-    // We need to allocate and clear the temporary byte register
-    // before performing the count operation since clearing the
-    // register using xor will clear the overflow flag.
+    // We allocate and clear the temporary byte register before
+    // performing the count operation since clearing the register using
+    // xor will clear the overflow flag.
     Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
     if (tmp.is_valid()) {
       __ Set(tmp.reg(), Immediate(0));
     }
 
-    if (is_increment) {
-      __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+    DeferredCode* deferred = NULL;
+    if (is_postfix) {
+      deferred = new DeferredPostfixCountOperation(new_value.reg(),
+                                                   old_value.reg(),
+                                                   is_increment);
     } else {
-      __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
+      deferred = new DeferredPrefixCountOperation(new_value.reg(),
+                                                  is_increment);
     }
 
-    // If the count operation didn't overflow and the result is a
-    // valid smi, we're done. Otherwise, we jump to the deferred
-    // slow-case code.
-    //
-    // We combine the overflow and the smi check if we could
-    // successfully allocate a temporary byte register.
+    if (is_increment) {
+      __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    } else {
+      __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
+    }
+
+    // If the count operation didn't overflow and the result is a valid
+    // smi, we're done. Otherwise, we jump to the deferred slow-case
+    // code.
     if (tmp.is_valid()) {
+      // We combine the overflow and the smi tag check if we could
+      // successfully allocate a temporary byte register.
       __ setcc(overflow, tmp.reg());
-      __ or_(Operand(tmp.reg()), value.reg());
+      __ or_(Operand(tmp.reg()), new_value.reg());
       __ test(tmp.reg(), Immediate(kSmiTagMask));
       tmp.Unuse();
-      deferred->enter()->Branch(not_zero, &value, not_taken);
-    } else {  // Otherwise we test separately for overflow and smi check.
-      deferred->SetEntryFrame(&value);
-      deferred->enter()->Branch(overflow, &value, not_taken);
-      __ test(value.reg(), Immediate(kSmiTagMask));
-      deferred->enter()->Branch(not_zero, &value, not_taken);
+      deferred->Branch(not_zero);
+    } else {
+      // Otherwise we test separately for overflow and smi tag.
+      deferred->Branch(overflow);
+      __ test(new_value.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
     }
+    deferred->BindExit();
 
-    // Store the new value in the target if not const.
-    deferred->BindExit(&value);
-    frame_->Push(&value);
-    if (!is_const) {
-      target.SetValue(NOT_CONST_INIT);
-    }
+    // Postfix: store the old value in the allocated slot under the
+    // reference.
+    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+    frame_->Push(&new_value);
+    // Non-constant: update the reference.
+    if (!is_const) target.SetValue(NOT_CONST_INIT);
   }
 
-  // Postfix: Discard the new value and use the old.
-  if (is_postfix) {
-    frame_->Drop();
-  }
+  // Postfix: drop the new value and use the old.
+  if (is_postfix) frame_->Drop();
 }
 
 
@@ -5265,9 +5628,14 @@
 #endif
 
 
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst.  The receiver register is restored after the call.
 class DeferredReferenceGetNamedValue: public DeferredCode {
  public:
-  explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
+  DeferredReferenceGetNamedValue(Register dst,
+                                 Register receiver,
+                                 Handle<String> name)
+      : dst_(dst), receiver_(receiver),  name_(name) {
     set_comment("[ DeferredReferenceGetNamedValue");
   }
 
@@ -5277,14 +5645,41 @@
 
  private:
   Label patch_site_;
+  Register dst_;
+  Register receiver_;
   Handle<String> name_;
 };
 
 
+void DeferredReferenceGetNamedValue::Generate() {
+  __ push(receiver_);
+  __ Set(ecx, Immediate(name_));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The call must be followed by a test eax instruction to indicate
+  // that the inobject property case was inlined.
+  //
+  // Store the delta to the map check instruction here in the test
+  // instruction.  Use masm_-> instead of the __ macro since the
+  // latter can't return a value.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(receiver_);
+}
+
+
 class DeferredReferenceGetKeyedValue: public DeferredCode {
  public:
-  explicit DeferredReferenceGetKeyedValue(bool is_global)
-      : is_global_(is_global) {
+  explicit DeferredReferenceGetKeyedValue(Register dst,
+                                          Register receiver,
+                                          Register key,
+                                          bool is_global)
+      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
@@ -5294,45 +5689,16 @@
 
  private:
   Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Register key_;
   bool is_global_;
 };
 
 
-#undef __
-#define __ ACCESS_MASM(cgen()->masm())
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  Result receiver;
-  enter()->Bind(&receiver);
-
-  cgen()->frame()->Push(&receiver);
-  cgen()->frame()->Push(name_);
-  Result answer = cgen()->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  // The call must be followed by a test eax instruction to indicate
-  // that the inobject property case was inlined.
-  ASSERT(answer.is_register() && answer.reg().is(eax));
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use cgen()->masm()-> instead of the __ macro since
-  // the latter can't return a value.
-  int delta_to_patch_site =
-      cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use cgen()->masm()-> instead of the __ macro because this
-  // is the instruction that gets patched and coverage code gets in the
-  // way.
-  cgen()->masm()->test(answer.reg(), Immediate(-delta_to_patch_site));
-  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
-  receiver = cgen()->frame()->Pop();
-  exit_.Jump(&receiver, &answer);
-}
-
-
 void DeferredReferenceGetKeyedValue::Generate() {
-  Result receiver;
-  Result key;
-  enter()->Bind(&receiver, &key);
-  cgen()->frame()->Push(&receiver);  // First IC argument.
-  cgen()->frame()->Push(&key);       // Second IC argument.
+  __ push(receiver_);  // First IC argument.
+  __ push(key_);       // Second IC argument.
 
   // Calculate the delta from the IC call instruction to the map check
   // cmp instruction in the inlined version.  This delta is stored in
@@ -5340,34 +5706,25 @@
   // it in the IC initialization code and patch the cmp instruction.
   // This means that we cannot allow test instructions after calls to
   // KeyedLoadIC stubs in other places.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   RelocInfo::Mode mode = is_global_
                          ? RelocInfo::CODE_TARGET_CONTEXT
                          : RelocInfo::CODE_TARGET;
-  Result value = cgen()->frame()->CallKeyedLoadIC(mode);
-  // The result needs to be specifically the eax register because the
-  // offset to the patch site will be expected in a test eax
-  // instruction.
-  ASSERT(value.is_register() && value.reg().is(eax));
-  // The delta from the start of the map-compare instruction to the test
-  // instruction.  We use cgen()->masm() directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn into
-  // something that can't return a value.  This is encountered when
-  // doing generated code coverage tests.
-  int delta_to_patch_site =
-      cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use cgen()->masm()-> instead of the __ macro because this
-  // is the instruction that gets patched and coverage code gets in the
-  // way.
-  cgen()->masm()->test(value.reg(), Immediate(-delta_to_patch_site));
+  __ call(ic, mode);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the __
+  // macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
   __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
 
-  // The receiver and key were spilled by the call, so their state as
-  // constants or copies has been changed.  Thus, they need to be
-  // "mergable" in the block at the exit label and are therefore
-  // passed as return results here.
-  key = cgen()->frame()->Pop();
-  receiver = cgen()->frame()->Pop();
-  exit_.Jump(&receiver, &key, &value);
+  if (!dst_.is(eax)) __ mov(dst_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
 }
 
 
@@ -5438,29 +5795,19 @@
       } else {
         // Inline the inobject property case.
         Comment cmnt(masm, "[ Inlined named property load");
-        DeferredReferenceGetNamedValue* deferred =
-            new DeferredReferenceGetNamedValue(GetName());
         Result receiver = cgen_->frame()->Pop();
         receiver.ToRegister();
 
-        // Try to preallocate the value register so that all frames
-        // reaching the deferred code are identical.
-        Result value = cgen_->allocator()->AllocateWithoutSpilling();
-        if (value.is_valid()) {
-          deferred->SetEntryFrame(&receiver);
-        }
+        Result value = cgen_->allocator()->Allocate();
+        ASSERT(value.is_valid());
+        DeferredReferenceGetNamedValue* deferred =
+            new DeferredReferenceGetNamedValue(value.reg(),
+                                               receiver.reg(),
+                                               GetName());
 
         // Check that the receiver is a heap object.
         __ test(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->enter()->Branch(zero, &receiver, not_taken);
-
-        // Do not allocate the value register after binding the patch
-        // site label.  The distance from the patch site to the offset
-        // must be constant.
-        if (!value.is_valid()) {
-          value = cgen_->allocator()->Allocate();
-          ASSERT(value.is_valid());
-        }
+        deferred->Branch(zero);
 
         __ bind(deferred->patch_site());
         // This is the map check instruction that will be patched (so we can't
@@ -5470,7 +5817,7 @@
                   Immediate(Factory::null_value()));
         // This branch is always a forwards branch so it's always a fixed
         // size which allows the assert below to succeed and patching to work.
-        deferred->enter()->Branch(not_equal, &receiver, not_taken);
+        deferred->Branch(not_equal);
 
         // The delta from the patch label to the load offset must be
         // statically known.
@@ -5483,7 +5830,7 @@
         masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
 
         __ IncrementCounter(&Counters::named_load_inline, 1);
-        deferred->BindExit(&receiver, &value);
+        deferred->BindExit();
         cgen_->frame()->Push(&receiver);
         cgen_->frame()->Push(&value);
       }
@@ -5503,28 +5850,34 @@
       // patch the map check if appropriate.
       if (cgen_->loop_nesting() > 0) {
         Comment cmnt(masm, "[ Inlined array index load");
-        DeferredReferenceGetKeyedValue* deferred =
-            new DeferredReferenceGetKeyedValue(is_global);
 
         Result key = cgen_->frame()->Pop();
         Result receiver = cgen_->frame()->Pop();
         key.ToRegister();
         receiver.ToRegister();
 
-        // Try to preallocate the elements and index scratch registers
-        // so that all frames reaching the deferred code are identical.
-        Result elements = cgen_->allocator()->AllocateWithoutSpilling();
-        Result index = cgen_->allocator()->AllocateWithoutSpilling();
-        if (elements.is_valid() && index.is_valid()) {
-          deferred->SetEntryFrame(&receiver, &key);
-        }
+        // Use a fresh temporary to load the elements without destroying
+        // the receiver which is needed for the deferred slow case.
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+
+        // Use a fresh temporary for the index and later the loaded
+        // value.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+
+        DeferredReferenceGetKeyedValue* deferred =
+            new DeferredReferenceGetKeyedValue(index.reg(),
+                                               receiver.reg(),
+                                               key.reg(),
+                                               is_global);
 
         // Check that the receiver is not a smi (only needed if this
         // is not a load from the global context) and that it has the
         // expected map.
         if (!is_global) {
           __ test(receiver.reg(), Immediate(kSmiTagMask));
-          deferred->enter()->Branch(zero, &receiver, &key, not_taken);
+          deferred->Branch(zero);
         }
 
         // Initially, use an invalid map. The map is patched in the IC
@@ -5533,36 +5886,28 @@
         // Use masm-> here instead of the double underscore macro since extra
         // coverage code can interfere with the patching.
         masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-               Immediate(Factory::null_value()));
-        deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
+                  Immediate(Factory::null_value()));
+        deferred->Branch(not_equal);
 
         // Check that the key is a smi.
         __ test(key.reg(), Immediate(kSmiTagMask));
-        deferred->enter()->Branch(not_zero, &receiver, &key, not_taken);
+        deferred->Branch(not_zero);
 
         // Get the elements array from the receiver and check that it
         // is not a dictionary.
-        if (!elements.is_valid()) {
-          elements = cgen_->allocator()->Allocate();
-          ASSERT(elements.is_valid());
-        }
         __ mov(elements.reg(),
                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
         __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
                Immediate(Factory::hash_table_map()));
-        deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+        deferred->Branch(equal);
 
         // Shift the key to get the actual index value and check that
         // it is within bounds.
-        if (!index.is_valid()) {
-          index = cgen_->allocator()->Allocate();
-          ASSERT(index.is_valid());
-        }
         __ mov(index.reg(), key.reg());
         __ sar(index.reg(), kSmiTagSize);
         __ cmp(index.reg(),
                FieldOperand(elements.reg(), Array::kLengthOffset));
-        deferred->enter()->Branch(above_equal, &receiver, &key, not_taken);
+        deferred->Branch(above_equal);
 
         // Load and check that the result is not the hole.  We could
         // reuse the index or elements register for the value.
@@ -5579,12 +5924,12 @@
         elements.Unuse();
         index.Unuse();
         __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
-        deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+        deferred->Branch(equal);
         __ IncrementCounter(&Counters::keyed_load_inline, 1);
 
+        deferred->BindExit();
         // Restore the receiver and key to the frame and push the
         // result on top of it.
-        deferred->BindExit(&receiver, &key, &value);
         cgen_->frame()->Push(&receiver);
         cgen_->frame()->Push(&key);
         cgen_->frame()->Push(&value);
@@ -5732,350 +6077,6 @@
 }
 
 
-Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
-                                                         Result* right) {
-  MacroAssembler* masm = cgen()->masm();
-  // Perform fast-case smi code for the operation (left <op> right) and
-  // returns the result in a Result.
-  // If any fast-case tests fail, it jumps to the slow-case deferred code,
-  // which calls the binary operation stub, with the arguments (in registers)
-  // on top of the frame.
-  // Consumes its arguments (sets left and right to invalid and frees their
-  // registers).
-
-  left->ToRegister();
-  right->ToRegister();
-  // A newly allocated register answer is used to hold the answer.
-  // The registers containing left and right are not modified in
-  // most cases, so they usually don't need to be spilled in the fast case.
-  Result answer = cgen()->allocator()->Allocate();
-
-  ASSERT(answer.is_valid());
-  // Perform the smi check.
-  if (left->reg().is(right->reg())) {
-    __ test(left->reg(), Immediate(kSmiTagMask));
-  } else {
-    __ mov(answer.reg(), left->reg());
-    __ or_(answer.reg(), Operand(right->reg()));
-    ASSERT(kSmiTag == 0);  // adjust zero check if not the case
-    __ test(answer.reg(), Immediate(kSmiTagMask));
-  }
-  switch (op_) {
-    case Token::ADD:
-      SetEntryFrame(left, right);
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      __ add(answer.reg(), Operand(right->reg()));  // add optimistically
-      enter()->Branch(overflow, left, right, not_taken);
-      break;
-
-    case Token::SUB:
-      SetEntryFrame(left, right);
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      __ sub(answer.reg(), Operand(right->reg()));  // subtract optimistically
-      enter()->Branch(overflow, left, right, not_taken);
-      break;
-
-    case Token::MUL: {
-      SetEntryFrame(left, right);
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // adjust code below if not the case
-      // Remove tag from the left operand (but keep sign).
-      // Left hand operand has been copied into answer.
-      __ sar(answer.reg(), kSmiTagSize);
-      // Do multiplication of smis, leaving result in answer.
-      __ imul(answer.reg(), Operand(right->reg()));
-      // Go slow on overflows.
-      enter()->Branch(overflow, left, right, not_taken);
-      // Check for negative zero result.  If product is zero,
-      // and one argument is negative, go to slow case.
-      // The frame is unchanged in this block, so local control flow can
-      // use a Label rather than a JumpTarget.
-      Label non_zero_result;
-      __ test(answer.reg(), Operand(answer.reg()));
-      __ j(not_zero, &non_zero_result, taken);
-      __ mov(answer.reg(), left->reg());
-      __ or_(answer.reg(), Operand(right->reg()));
-      enter()->Branch(negative, left, right, not_taken);
-      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
-      __ bind(&non_zero_result);
-      break;
-    }
-
-    case Token::DIV:  // Fall through.
-    case Token::MOD: {
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      // Div and mod use the registers eax and edx.  Left and right must
-      // be preserved, because the original operands are needed if we switch
-      // to the slow case.  Move them if either is in eax or edx.
-      // The Result answer should be changed into an alias for eax.
-      // Precondition:
-      // The Results left and right are valid.  They may be the same register,
-      // and may be unspilled.  The Result answer is valid and is distinct
-      // from left and right, and is spilled.
-      // The value in left is copied to answer.
-
-      Result reg_eax = cgen()->allocator()->Allocate(eax);
-      Result reg_edx = cgen()->allocator()->Allocate(edx);
-      // These allocations may have failed, if one of left, right, or answer
-      // is in register eax or edx.
-      bool left_copied_to_eax = false;  // We will make sure this becomes true.
-
-      // Part 1: Get eax
-      if (answer.reg().is(eax)) {
-        reg_eax = answer;
-        left_copied_to_eax = true;
-      } else if (right->reg().is(eax) || left->reg().is(eax)) {
-        // We need a non-edx register to move one or both of left and right to.
-        // We use answer if it is not edx, otherwise we allocate one.
-        if (answer.reg().is(edx)) {
-          reg_edx = answer;
-          answer = cgen()->allocator()->Allocate();
-          ASSERT(answer.is_valid());
-        }
-
-        if (left->reg().is(eax)) {
-          reg_eax = *left;
-          left_copied_to_eax = true;
-          *left = answer;
-        }
-        if (right->reg().is(eax)) {
-          reg_eax = *right;
-          *right = answer;
-        }
-        __ mov(answer.reg(), eax);
-      }
-      // End of Part 1.
-      // reg_eax is valid, and neither left nor right is in eax.
-      ASSERT(reg_eax.is_valid());
-      ASSERT(!left->reg().is(eax));
-      ASSERT(!right->reg().is(eax));
-
-      // Part 2: Get edx
-      // reg_edx is invalid if and only if either left, right,
-      // or answer is in edx.  If edx is valid, then either edx
-      // was free, or it was answer, but answer was reallocated.
-      if (answer.reg().is(edx)) {
-        reg_edx = answer;
-      } else if (right->reg().is(edx) || left->reg().is(edx)) {
-        // Is answer used?
-        if (answer.reg().is(eax) || answer.reg().is(left->reg()) ||
-            answer.reg().is(right->reg())) {
-          answer = cgen()->allocator()->Allocate();
-          ASSERT(answer.is_valid());  // We cannot hit both Allocate() calls.
-        }
-        if (left->reg().is(edx)) {
-          reg_edx = *left;
-          *left = answer;
-        }
-        if (right->reg().is(edx)) {
-          reg_edx = *right;
-          *right = answer;
-        }
-        __ mov(answer.reg(), edx);
-      }
-      // End of Part 2
-      ASSERT(reg_edx.is_valid());
-      ASSERT(!left->reg().is(eax));
-      ASSERT(!right->reg().is(eax));
-
-      answer = reg_eax;  // May free answer, if it was never used.
-      cgen()->frame()->Spill(eax);
-      if (!left_copied_to_eax) {
-        __ mov(eax, left->reg());
-        left_copied_to_eax = true;
-      }
-      cgen()->frame()->Spill(edx);
-
-      // Postcondition:
-      // reg_eax, reg_edx are valid, correct, and spilled.
-      // reg_eax contains the value originally in left
-      // left and right are not eax or edx.  They may or may not be
-      // spilled or distinct.
-      // answer is an alias for reg_eax.
-
-      // Sign extend eax into edx:eax.
-      __ cdq();
-      // Check for 0 divisor.
-      __ test(right->reg(), Operand(right->reg()));
-      enter()->Branch(zero, left, right, not_taken);
-      // Divide edx:eax by the right operand.
-      __ idiv(right->reg());
-      if (op_ == Token::DIV) {
-        // Check for negative zero result.  If result is zero, and divisor
-        // is negative, return a floating point negative zero.
-        // The frame is unchanged in this block, so local control flow can
-        // use a Label rather than a JumpTarget.
-        Label non_zero_result;
-        __ test(left->reg(), Operand(left->reg()));
-        __ j(not_zero, &non_zero_result, taken);
-        __ test(right->reg(), Operand(right->reg()));
-        enter()->Branch(negative, left, right, not_taken);
-        __ bind(&non_zero_result);
-        // Check for the corner case of dividing the most negative smi
-        // by -1. We cannot use the overflow flag, since it is not set
-        // by idiv instruction.
-        ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-        __ cmp(eax, 0x40000000);
-        enter()->Branch(equal, left, right, not_taken);
-        // Check that the remainder is zero.
-        __ test(edx, Operand(edx));
-        enter()->Branch(not_zero, left, right, not_taken);
-        // Tag the result and store it in register temp.
-        ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-        __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag));
-      } else {
-        ASSERT(op_ == Token::MOD);
-        // Check for a negative zero result.  If the result is zero, and the
-        // dividend is negative, return a floating point negative zero.
-        // The frame is unchanged in this block, so local control flow can
-        // use a Label rather than a JumpTarget.
-        Label non_zero_result;
-        __ test(edx, Operand(edx));
-        __ j(not_zero, &non_zero_result, taken);
-        __ test(left->reg(), Operand(left->reg()));
-        enter()->Branch(negative, left, right, not_taken);
-        __ bind(&non_zero_result);
-        // The answer is in edx.
-        answer = reg_edx;
-      }
-      break;
-    }
-    case Token::BIT_OR:
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      __ or_(answer.reg(), Operand(right->reg()));
-      break;
-
-    case Token::BIT_AND:
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      __ and_(answer.reg(), Operand(right->reg()));
-      break;
-
-    case Token::BIT_XOR:
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      __ xor_(answer.reg(), Operand(right->reg()));
-      break;
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      enter()->Branch(not_zero, left, right, not_taken);
-      __ mov(answer.reg(), left->reg());
-      // Move right into ecx.
-      // Left is in two registers already, so even if left or answer is ecx,
-      // we can move right to it, and use the other one.
-      // Right operand must be in register cl because x86 likes it that way.
-      if (right->reg().is(ecx)) {
-        // Right is already in the right place.  Left may be in the
-        // same register, which causes problems. Always use answer
-        // instead of left, even if left is not ecx, since this avoids
-        // spilling left.
-        *left = answer;
-      } else if (left->reg().is(ecx)) {
-        cgen()->frame()->Spill(left->reg());
-        __ mov(left->reg(), right->reg());
-        *right = *left;
-        *left = answer;  // Use copy of left in answer as left.
-      } else if (answer.reg().is(ecx)) {
-        __ mov(answer.reg(), right->reg());
-        *right = answer;
-      } else {
-        Result reg_ecx = cgen()->allocator()->Allocate(ecx);
-        ASSERT(reg_ecx.is_valid());
-        __ mov(ecx, right->reg());
-        *right = reg_ecx;
-        // Answer and left both contain the left operand.  Use answer, so
-        // left is not spilled.
-        *left = answer;
-      }
-      ASSERT(left->reg().is_valid());
-      ASSERT(!left->reg().is(ecx));
-      ASSERT(right->reg().is(ecx));
-      answer.Unuse();  // Answer may now be being used for left or right.
-      // We will modify left and right, which we do not do in any other
-      // binary operation.  The exits to slow code need to restore the
-      // original values of left and right, or at least values that give
-      // the same answer.
-
-      // We are modifying left and right.  They must be spilled!
-      cgen()->frame()->Spill(left->reg());
-      cgen()->frame()->Spill(right->reg());
-
-      // Remove tags from operands (but keep sign).
-      __ sar(left->reg(), kSmiTagSize);
-      __ sar(ecx, kSmiTagSize);
-      // Perform the operation.
-      switch (op_) {
-        case Token::SAR:
-          __ sar(left->reg());
-          // No checks of result necessary
-          break;
-        case Token::SHR: {
-          __ shr(left->reg());
-          // Check that the *unsigned* result fits in a smi.
-          // Neither of the two high-order bits can be set:
-          // - 0x80000000: high bit would be lost when smi tagging.
-          // - 0x40000000: this number would convert to negative when
-          // Smi tagging these two cases can only happen with shifts
-          // by 0 or 1 when handed a valid smi.
-          // If the answer cannot be represented by a SMI, restore
-          // the left and right arguments, and jump to slow case.
-          // The low bit of the left argument may be lost, but only
-          // in a case where it is dropped anyway.
-          JumpTarget result_ok;
-          __ test(left->reg(), Immediate(0xc0000000));
-          result_ok.Branch(zero, left, taken);
-          __ shl(left->reg());
-          ASSERT(kSmiTag == 0);
-          __ shl(left->reg(), kSmiTagSize);
-          __ shl(right->reg(), kSmiTagSize);
-          enter()->Jump(left, right);
-          result_ok.Bind(left);
-          break;
-        }
-        case Token::SHL: {
-          __ shl(left->reg());
-          // Check that the *signed* result fits in a smi.
-          JumpTarget result_ok;
-          __ cmp(left->reg(), 0xc0000000);
-          result_ok.Branch(positive, left, taken);
-
-          __ shr(left->reg());
-          ASSERT(kSmiTag == 0);
-          __ shl(left->reg(), kSmiTagSize);
-          __ shl(right->reg(), kSmiTagSize);
-          enter()->Jump(left, right);
-          result_ok.Bind(left);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
-      // Smi-tag the result, in left, and make answer an alias for left->
-      answer = *left;
-      answer.ToRegister();
-      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-      __ lea(answer.reg(),
-             Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-  left->Unuse();
-  right->Unuse();
-  return answer;
-}
-
-
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (eax <op> ebx) and
   // leave result in register eax.
@@ -7159,6 +7160,9 @@
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Label not_outermost_js, not_outermost_js_2;
+#endif
 
   // Setup frame.
   __ push(ebp);
@@ -7177,6 +7181,15 @@
   ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
   __ push(Operand::StaticVariable(c_entry_fp));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ j(NegateCondition(equal), &not_outermost_js);
+  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+  __ bind(&not_outermost_js);
+#endif
+
   // Call a faked try-block that does the invoke.
   __ call(&invoke);
 
@@ -7220,6 +7233,15 @@
   // Pop next_sp.
   __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current EBP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+  __ j(NegateCondition(equal), &not_outermost_js_2);
+  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+  __ bind(&not_outermost_js_2);
+#endif
+
   // Restore the top frame descriptor from the stack.
   __ bind(&exit);
   __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 4161370..7636c4e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -561,8 +561,8 @@
 
 
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                          Register scratch,
-                                          Label* miss) {
+                                            Register scratch,
+                                            Label* miss) {
   Label same_contexts;
 
   ASSERT(!holder_reg.is(scratch));
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 6c675f9..b31f706 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -322,6 +322,7 @@
 void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
                                            JSObject* object,
                                            JSObject* holder,
+                                           Smi* lookup_hint,
                                            Register receiver,
                                            Register name,
                                            Register scratch1,
@@ -340,12 +341,15 @@
   __ push(receiver);  // receiver
   __ push(reg);  // holder
   __ push(name);  // name
+  // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
+  // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
+  __ push(Immediate(lookup_hint));
   __ push(scratch2);  // restore return address
 
   // Do tail-call to the runtime system.
   ExternalReference load_ic_property =
       ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
-  __ TailCallRuntime(load_ic_property, 3);
+  __ TailCallRuntime(load_ic_property, 4);
 }
 
 
@@ -670,11 +674,12 @@
   __ push(edx);  // receiver
   __ push(reg);  // holder
   __ push(Operand(ebp, (argc + 3) * kPointerSize));  // name
+  __ push(Immediate(holder->InterceptorPropertyLookupHint(name)));
 
   // Perform call.
   ExternalReference load_interceptor =
       ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
-  __ mov(eax, Immediate(3));
+  __ mov(eax, Immediate(4));
   __ mov(ebx, Immediate(load_interceptor));
 
   CEntryStub stub;
@@ -974,7 +979,18 @@
   Label miss;
 
   __ mov(eax, (Operand(esp, kPointerSize)));
-  GenerateLoadInterceptor(masm(), receiver, holder, eax, ecx, edx, ebx, &miss);
+  // TODO(368): Compile in the whole chain: all the interceptors in
+  // prototypes and ultimate answer.
+  GenerateLoadInterceptor(masm(),
+                          receiver,
+                          holder,
+                          holder->InterceptorPropertyLookupHint(name),
+                          eax,
+                          ecx,
+                          edx,
+                          ebx,
+                          &miss);
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1089,7 +1105,15 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadInterceptor(masm(), receiver, holder, ecx, eax, edx, ebx, &miss);
+  GenerateLoadInterceptor(masm(),
+                          receiver,
+                          holder,
+                          Smi::FromInt(JSObject::kLookupInHolder),
+                          ecx,
+                          eax,
+                          edx,
+                          ebx,
+                          &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 54cfd9d..6e6ebd5 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -545,6 +545,8 @@
 
   bool Equals(VirtualFrame* other);
 
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
   friend class JumpTarget;
 };
 
diff --git a/src/log.cc b/src/log.cc
index c66a422..c1edf4d 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -146,11 +146,18 @@
     return;
   }
 
+  const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
+  if (js_entry_sp == 0) {
+    // Not executing JS now.
+    sample->frames_count = 0;
+    return;
+  }
+
   SafeStackTraceFrameIterator it(
       reinterpret_cast<Address>(sample->fp),
       reinterpret_cast<Address>(sample->sp),
       reinterpret_cast<Address>(sample->sp),
-      reinterpret_cast<Address>(low_stack_bound_));
+      js_entry_sp);
   int i = 0;
   while (!it.done() && i < TickSample::kMaxFramesCount) {
     sample->stack[i++] = it.frame()->pc();
@@ -166,14 +173,13 @@
 //
 class Ticker: public Sampler {
  public:
-  explicit Ticker(int interval, uintptr_t low_stack_bound):
-      Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL),
-      stack_tracer_(low_stack_bound) {}
+  explicit Ticker(int interval):
+      Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL) {}
 
   ~Ticker() { if (IsActive()) Stop(); }
 
   void Tick(TickSample* sample) {
-    if (IsProfiling()) stack_tracer_.Trace(sample);
+    if (IsProfiling()) StackTracer::Trace(sample);
     if (profiler_) profiler_->Insert(sample);
     if (window_) window_->AddState(sample->state);
   }
@@ -201,7 +207,6 @@
  private:
   SlidingStateWindow* window_;
   Profiler* profiler_;
-  StackTracer stack_tracer_;
 };
 
 
@@ -1002,11 +1007,7 @@
 
   current_state_ = &bottom_state_;
 
-  // as log is initialized early with V8, we can assume that JS execution
-  // frames can never reach this point on stack
-  int stack_var;
-  ticker_ = new Ticker(
-      kSamplingIntervalMs, reinterpret_cast<uintptr_t>(&stack_var));
+  ticker_ = new Ticker(kSamplingIntervalMs);
 
   if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
     sliding_state_window_ = new SlidingStateWindow();
diff --git a/src/log.h b/src/log.h
index 10d8833..2f8f81c 100644
--- a/src/log.h
+++ b/src/log.h
@@ -277,14 +277,9 @@
 
 
 // Class that extracts stack trace, used for profiling.
-class StackTracer BASE_EMBEDDED {
+class StackTracer : public AllStatic {
  public:
-  explicit StackTracer(uintptr_t low_stack_bound)
-      : low_stack_bound_(low_stack_bound) { }
-  void Trace(TickSample* sample);
- private:
-
-  uintptr_t low_stack_bound_;
+  static void Trace(TickSample* sample);
 };
 
 
diff --git a/src/memory.h b/src/memory.h
index 1e36bf5..c64699e 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -36,6 +36,10 @@
 
 class Memory {
  public:
+  static uint16_t& uint16_at(Address addr)  {
+    return *reinterpret_cast<uint16_t*>(addr);
+  }
+
   static uint32_t& uint32_at(Address addr)  {
     return *reinterpret_cast<uint32_t*>(addr);
   }
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index 371bc05..f5a12c7 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -1617,6 +1617,11 @@
 };
 
 
+ScriptMirror.prototype.compilationType = function() {
+  return this.script_.compilation_type;
+};
+
+
 ScriptMirror.prototype.lineCount = function() {
   return this.script_.lineCount();
 };
@@ -1638,6 +1643,20 @@
 };
 
 
+ScriptMirror.prototype.evalFromFunction = function() {
+  return MakeMirror(this.script_.eval_from_function);
+};
+
+
+ScriptMirror.prototype.evalFromLocation = function() {
+  var eval_from_function = this.evalFromFunction();
+  if (!eval_from_function.isUndefined()) {
+    var position = this.script_.eval_from_position;
+    return eval_from_function.script().locationFromPosition(position, true);
+  }
+};
+
+
 ScriptMirror.prototype.toText = function() {
   var result = '';
   result += this.name();
@@ -1901,6 +1920,14 @@
       }
       content.sourceLength = mirror.source().length;
       content.scriptType = mirror.scriptType();
+      content.compilationType = mirror.compilationType();
+      if (mirror.compilationType() == 1) {  // Compilation type eval.
+        content.evalFromScript =
+            this.serializeReference(mirror.evalFromFunction().script());
+        var evalFromLocation = mirror.evalFromLocation()
+        content.evalFromLocation = { line: evalFromLocation.line,
+                                     column: evalFromLocation.column}
+      }
       if (mirror.context()) {
         content.context = this.serializeReference(mirror.context());
       }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 475b57b..d34e465 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2112,7 +2112,11 @@
 ACCESSORS(Script, context_data, Object, kContextOffset)
 ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
 ACCESSORS(Script, type, Smi, kTypeOffset)
+ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
+ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
+ACCESSORS(Script, eval_from_instructions_offset, Smi,
+          kEvalFrominstructionsOffsetOffset)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -2554,6 +2558,24 @@
 }
 
 
+Smi* JSObject::InterceptorPropertyLookupHint(String* name) {
+  // TODO(antonm): Do we want to do any shortcuts for global object?
+  if (HasFastProperties()) {
+    LookupResult lookup;
+    LocalLookupRealNamedProperty(name, &lookup);
+    if (lookup.IsValid()) {
+      if (lookup.type() == FIELD && lookup.IsCacheable()) {
+        return Smi::FromInt(lookup.GetFieldIndex());
+      }
+    } else {
+      return Smi::FromInt(kLookupInPrototype);
+    }
+  }
+
+  return Smi::FromInt(kLookupInHolder);
+}
+
+
 bool AccessorInfo::all_can_read() {
   return BooleanBit::get(flag(), kAllCanReadBit);
 }
diff --git a/src/objects.cc b/src/objects.cc
index a3526eb..0546578 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -385,7 +385,9 @@
 }
 
 
-Object* JSObject::DeleteLazyProperty(LookupResult* result, String* name) {
+Object* JSObject::DeleteLazyProperty(LookupResult* result,
+                                     String* name,
+                                     DeleteMode mode) {
   HandleScope scope;
   Handle<JSObject> this_handle(this);
   Handle<String> name_handle(name);
@@ -393,7 +395,7 @@
   LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
            &pending_exception);
   if (pending_exception) return Failure::Exception();
-  return this_handle->DeleteProperty(*name_handle);
+  return this_handle->DeleteProperty(*name_handle, mode);
 }
 
 
@@ -2120,7 +2122,7 @@
 }
 
 
-Object* JSObject::DeletePropertyPostInterceptor(String* name) {
+Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
   // Check local property, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
@@ -2134,7 +2136,7 @@
   // Attempt to remove the property from the property dictionary.
   Dictionary* dictionary = property_dictionary();
   int entry = dictionary->FindStringEntry(name);
-  if (entry != -1) return dictionary->DeleteProperty(entry);
+  if (entry != -1) return dictionary->DeleteProperty(entry, mode);
   return Heap::true_value();
 }
 
@@ -2164,13 +2166,15 @@
       return *v8::Utils::OpenHandle(*result);
     }
   }
-  Object* raw_result = this_handle->DeletePropertyPostInterceptor(*name_handle);
+  Object* raw_result =
+      this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
   RETURN_IF_SCHEDULED_EXCEPTION();
   return raw_result;
 }
 
 
-Object* JSObject::DeleteElementPostInterceptor(uint32_t index) {
+Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
+                                               DeleteMode mode) {
   if (HasFastElements()) {
     uint32_t length = IsJSArray() ?
       static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -2183,7 +2187,7 @@
   ASSERT(!HasFastElements());
   Dictionary* dictionary = element_dictionary();
   int entry = dictionary->FindNumberEntry(index);
-  if (entry != -1) return dictionary->DeleteProperty(entry);
+  if (entry != -1) return dictionary->DeleteProperty(entry, mode);
   return Heap::true_value();
 }
 
@@ -2214,13 +2218,14 @@
     ASSERT(result->IsBoolean());
     return *v8::Utils::OpenHandle(*result);
   }
-  Object* raw_result = this_handle->DeleteElementPostInterceptor(index);
+  Object* raw_result =
+      this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
   RETURN_IF_SCHEDULED_EXCEPTION();
   return raw_result;
 }
 
 
-Object* JSObject::DeleteElement(uint32_t index) {
+Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
       !Top::MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
@@ -2232,10 +2237,14 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return Heap::false_value();
     ASSERT(proto->IsJSGlobalObject());
-    return JSGlobalObject::cast(proto)->DeleteElement(index);
+    return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
   }
 
   if (HasIndexedInterceptor()) {
+    // Skip interceptor if forcing deletion.
+    if (mode == FORCE_DELETION) {
+      return DeleteElementPostInterceptor(index, mode);
+    }
     return DeleteElementWithInterceptor(index);
   }
 
@@ -2250,13 +2259,13 @@
   } else {
     Dictionary* dictionary = element_dictionary();
     int entry = dictionary->FindNumberEntry(index);
-    if (entry != -1) return dictionary->DeleteProperty(entry);
+    if (entry != -1) return dictionary->DeleteProperty(entry, mode);
   }
   return Heap::true_value();
 }
 
 
-Object* JSObject::DeleteProperty(String* name) {
+Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
   // ECMA-262, 3rd, 8.6.2.5
   ASSERT(name->IsString());
 
@@ -2271,23 +2280,32 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return Heap::false_value();
     ASSERT(proto->IsJSGlobalObject());
-    return JSGlobalObject::cast(proto)->DeleteProperty(name);
+    return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
   }
 
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
-    return DeleteElement(index);
+    return DeleteElement(index, mode);
   } else {
     LookupResult result;
     LocalLookup(name, &result);
     if (!result.IsValid()) return Heap::true_value();
-    if (result.IsDontDelete()) return Heap::false_value();
+    // Ignore attributes if forcing a deletion.
+    if (result.IsDontDelete() && mode != FORCE_DELETION) {
+      return Heap::false_value();
+    }
     // Check for interceptor.
     if (result.type() == INTERCEPTOR) {
+      // Skip interceptor if forcing a deletion.
+      if (mode == FORCE_DELETION) {
+        return DeletePropertyPostInterceptor(name, mode);
+      }
       return DeletePropertyWithInterceptor(name);
     }
     if (!result.IsLoaded()) {
-      return JSObject::cast(this)->DeleteLazyProperty(&result, name);
+      return JSObject::cast(this)->DeleteLazyProperty(&result,
+                                                      name,
+                                                      mode);
     }
     // Normalize object if needed.
     Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
@@ -2295,7 +2313,7 @@
     // Make sure the properties are normalized before removing the entry.
     Dictionary* dictionary = property_dictionary();
     int entry = dictionary->FindStringEntry(name);
-    if (entry != -1) return dictionary->DeleteProperty(entry);
+    if (entry != -1) return dictionary->DeleteProperty(entry, mode);
     return Heap::true_value();
   }
 }
@@ -5617,9 +5635,11 @@
 }
 
 
-Object* JSObject::GetPropertyWithInterceptor(JSObject* receiver,
-                                             String* name,
-                                             PropertyAttributes* attributes) {
+bool JSObject::GetPropertyWithInterceptorProper(
+    JSObject* receiver,
+    String* name,
+    PropertyAttributes* attributes,
+    Object** result_object) {
   HandleScope scope;
   Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
   Handle<JSObject> receiver_handle(receiver);
@@ -5640,19 +5660,93 @@
       VMState state(EXTERNAL);
       result = getter(v8::Utils::ToLocal(name_handle), info);
     }
-    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (Top::has_scheduled_exception()) {
+      return false;
+    }
     if (!result.IsEmpty()) {
       *attributes = NONE;
-      return *v8::Utils::OpenHandle(*result);
+      *result_object = *v8::Utils::OpenHandle(*result);
+      return true;
     }
   }
 
-  Object* raw_result = holder_handle->GetPropertyPostInterceptor(
+  return false;
+}
+
+
+Object* JSObject::GetInterceptorPropertyWithLookupHint(
+    JSObject* receiver,
+    Smi* lookup_hint,
+    String* name,
+    PropertyAttributes* attributes) {
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+  Handle<String> name_handle(name);
+
+  Object* result = NULL;
+  if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) {
+    return result;
+  } else {
+    RETURN_IF_SCHEDULED_EXCEPTION();
+  }
+
+  int property_index = lookup_hint->value();
+  if (property_index >= 0) {
+    result = holder_handle->FastPropertyAt(property_index);
+  } else {
+    switch (property_index) {
+      case kLookupInPrototype: {
+          Object* pt = holder_handle->GetPrototype();
+          *attributes = ABSENT;
+          if (pt == Heap::null_value()) return Heap::undefined_value();
+          result = pt->GetPropertyWithReceiver(
+              *receiver_handle,
+              *name_handle,
+              attributes);
+          RETURN_IF_SCHEDULED_EXCEPTION();
+        }
+        break;
+
+      case kLookupInHolder:
+        result = holder_handle->GetPropertyPostInterceptor(
+            *receiver_handle,
+            *name_handle,
+            attributes);
+        RETURN_IF_SCHEDULED_EXCEPTION();
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  return result;
+}
+
+
+Object* JSObject::GetPropertyWithInterceptor(
+    JSObject* receiver,
+    String* name,
+    PropertyAttributes* attributes) {
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<JSObject> holder_handle(this);
+  Handle<String> name_handle(name);
+
+  Object* result = NULL;
+  if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) {
+    return result;
+  } else {
+    RETURN_IF_SCHEDULED_EXCEPTION();
+  }
+
+  result = holder_handle->GetPropertyPostInterceptor(
       *receiver_handle,
       *name_handle,
       attributes);
   RETURN_IF_SCHEDULED_EXCEPTION();
-  return raw_result;
+  return result;
 }
 
 
@@ -6867,9 +6961,12 @@
 }
 
 
-Object* Dictionary::DeleteProperty(int entry) {
+Object* Dictionary::DeleteProperty(int entry, JSObject::DeleteMode mode) {
   PropertyDetails details = DetailsAt(entry);
-  if (details.IsDontDelete()) return Heap::false_value();
+  // Ignore attributes if forcing a deletion.
+  if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
+    return Heap::false_value();
+  }
   SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
   ElementRemoved();
   return Heap::true_value();
diff --git a/src/objects.h b/src/objects.h
index 6bdddeb..493d22b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1275,9 +1275,12 @@
     return GetLocalPropertyAttribute(name) != ABSENT;
   }
 
-  Object* DeleteProperty(String* name);
-  Object* DeleteElement(uint32_t index);
-  Object* DeleteLazyProperty(LookupResult* result, String* name);
+  enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+  Object* DeleteProperty(String* name, DeleteMode mode);
+  Object* DeleteElement(uint32_t index, DeleteMode mode);
+  Object* DeleteLazyProperty(LookupResult* result,
+                             String* name,
+                             DeleteMode mode);
 
   // Tests for the fast common case for property enumeration.
   bool IsSimpleEnum();
@@ -1349,6 +1352,14 @@
   Object* LookupCallbackSetterInPrototypes(uint32_t index);
   void LookupCallback(String* name, LookupResult* result);
 
+  inline Smi* InterceptorPropertyLookupHint(String* name);
+  Object* GetInterceptorPropertyWithLookupHint(JSObject* receiver,
+                                               Smi* lookup_hint,
+                                               String* name,
+                                               PropertyAttributes* attributes);
+  static const int kLookupInHolder = -1;
+  static const int kLookupInPrototype = -2;
+
   // Returns the number of properties on this object filtering out properties
   // with the specified attributes (ignoring interceptors).
   int NumberOfLocalProperties(PropertyAttributes filter);
@@ -1511,10 +1522,10 @@
 
   Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
 
-  Object* DeletePropertyPostInterceptor(String* name);
+  Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode);
   Object* DeletePropertyWithInterceptor(String* name);
 
-  Object* DeleteElementPostInterceptor(uint32_t index);
+  Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode);
   Object* DeleteElementWithInterceptor(uint32_t index);
 
   PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
@@ -1540,6 +1551,14 @@
 
   void LookupInDescriptor(String* name, LookupResult* result);
 
+  // Attempts to get property with a named interceptor getter.  Returns
+  // |true| and stores result into |result| if succesful, otherwise
+  // returns |false|
+  bool GetPropertyWithInterceptorProper(JSObject* receiver,
+                                        String* name,
+                                        PropertyAttributes* attributes,
+                                        Object** result);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -2041,7 +2060,7 @@
   int FindNumberEntry(uint32_t index);
 
   // Delete a property from the dictionary.
-  Object* DeleteProperty(int entry);
+  Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
 
   // Type specific at put (default NONE attributes is used when adding).
   Object* AtStringPut(String* key, Object* value);
@@ -2612,7 +2631,7 @@
   static const int kHasInstanceCallHandler = 6;
   static const int kIsAccessCheckNeeded = 7;
 
-  // Bit positions for but field 2
+  // Bit positions for bit field 2
   static const int kNeedsLoading = 0;
 
  private:
@@ -2630,17 +2649,23 @@
 };
 
 
-// Script types.
-enum ScriptType {
-  SCRIPT_TYPE_NATIVE,
-  SCRIPT_TYPE_EXTENSION,
-  SCRIPT_TYPE_NORMAL
-};
-
-
 // Script describes a script which has been added to the VM.
 class Script: public Struct {
  public:
+  // Script types.
+  enum Type {
+    TYPE_NATIVE,
+    TYPE_EXTENSION,
+    TYPE_NORMAL
+  };
+
+  // Script compilation types.
+  enum CompilationType {
+    COMPILATION_TYPE_HOST,
+    COMPILATION_TYPE_EVAL,
+    COMPILATION_TYPE_JSON
+  };
+
   // [source]: the script source.
   DECL_ACCESSORS(source, Object)
 
@@ -2669,9 +2694,20 @@
   // [type]: the script type.
   DECL_ACCESSORS(type, Smi)
 
-  // [line_ends]: array of line ends positions
+  // [compilation]: how the the script was compiled.
+  DECL_ACCESSORS(compilation_type, Smi)
+
+  // [line_ends]: array of line ends positions.
   DECL_ACCESSORS(line_ends, Object)
 
+  // [eval_from_function]: for eval scripts the funcion from which eval was
+  // called.
+  DECL_ACCESSORS(eval_from_function, Object)
+
+  // [eval_from_instructions_offset]: the instruction offset in the code for the
+  // function from which eval was called where eval was called.
+  DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+
   static inline Script* cast(Object* obj);
 
 #ifdef DEBUG
@@ -2687,9 +2723,13 @@
   static const int kContextOffset = kDataOffset + kPointerSize;
   static const int kWrapperOffset = kContextOffset + kPointerSize;
   static const int kTypeOffset = kWrapperOffset + kPointerSize;
-  static const int kLineEndsOffset = kTypeOffset + kPointerSize;
+  static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
+  static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
   static const int kIdOffset = kLineEndsOffset + kPointerSize;
-  static const int kSize = kIdOffset + kPointerSize;
+  static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
+  static const int kEvalFrominstructionsOffsetOffset =
+      kEvalFromFunctionOffset + kPointerSize;
+  static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 1b07f4d..79ffe81 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -88,10 +88,15 @@
 
 
 int OS::ActivationFrameAlignment() {
-  // Floating point code runs faster if the stack is 8-byte aligned.
+#ifdef V8_TARGET_ARCH_ARM
   // On EABI ARM targets this is required for fp correctness in the
   // runtime system.
   return 8;
+#else
+  // With gcc 4.4 the tree vectorization optimiser can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
+  return 16;
+#endif
 }
 
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 641f754..79f1883 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -693,10 +693,10 @@
       Print(" ");
     }
     PrintLabels(labels);
-    Print("\n");
   } else if (info != NULL) {
     PrintIndented(info);
   }
+  Print("\n");
 }
 
 
@@ -918,9 +918,8 @@
 
 void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
   IndentedScope indent("REGEXP LITERAL");
-  PrintLiteral(node->pattern(), false);
-  Print(",");
-  PrintLiteral(node->flags(), false);
+  PrintLiteralIndented("PATTERN", node->pattern(), false);
+  PrintLiteralIndented("FLAGS", node->flags(), false);
 }
 
 
diff --git a/src/runtime.cc b/src/runtime.cc
index 2fcdff1..78be512 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2780,6 +2780,42 @@
 }
 
 
+Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
+                                           Handle<Object> key) {
+  HandleScope scope;
+
+  // Check if the given key is an array index.
+  uint32_t index;
+  if (Array::IndexFromObject(*key, &index)) {
+    // In Firefox/SpiderMonkey, Safari and Opera you can access the
+    // characters of a string using [] notation.  In the case of a
+    // String object we just need to redirect the deletion to the
+    // underlying string if the index is in range.  Since the
+    // underlying string does nothing with the deletion, we can ignore
+    // such deletions.
+    if (js_object->IsStringObjectWithCharacterAt(index)) {
+      return Heap::true_value();
+    }
+
+    return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
+  }
+
+  Handle<String> key_string;
+  if (key->IsString()) {
+    key_string = Handle<String>::cast(key);
+  } else {
+    // Call-back into JavaScript to convert the key to a string.
+    bool has_pending_exception = false;
+    Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    key_string = Handle<String>::cast(converted);
+  }
+
+  key_string->TryFlattenIfNotFlat();
+  return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
+}
+
+
 static Object* Runtime_SetProperty(Arguments args) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
@@ -2831,7 +2867,7 @@
 
   CONVERT_CHECKED(JSObject, object, args[0]);
   CONVERT_CHECKED(String, key, args[1]);
-  return object->DeleteProperty(key);
+  return object->DeleteProperty(key, JSObject::NORMAL_DELETION);
 }
 
 
diff --git a/src/runtime.h b/src/runtime.h
index 474f1b3..30bb7c5 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -387,6 +387,9 @@
                                         Handle<Object> value,
                                         PropertyAttributes attr);
 
+  static Object* ForceDeleteObjectProperty(Handle<JSObject> object,
+                                           Handle<Object> key);
+
   static Object* GetObjectProperty(Handle<Object> object, Handle<Object> key);
 
   // This function is used in FunctionNameUsing* tests.
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ca14d1a..f7e5456 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -719,10 +719,12 @@
   JSObject* recv = JSObject::cast(args[0]);
   JSObject* holder = JSObject::cast(args[1]);
   String* name = String::cast(args[2]);
+  Smi* lookup_hint = Smi::cast(args[3]);
   ASSERT(holder->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
-  Object* result = holder->GetPropertyWithInterceptor(recv, name, &attr);
 
+  Object* result = holder->GetInterceptorPropertyWithLookupHint(
+      recv, lookup_hint, name, &attr);
   if (result->IsFailure()) return result;
 
   // If the property is present, return it.
diff --git a/src/stub-cache.h b/src/stub-cache.h
index ed513a0..b79841a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -351,6 +351,7 @@
   static void GenerateLoadInterceptor(MacroAssembler* masm,
                                       JSObject* object,
                                       JSObject* holder,
+                                      Smi* lookup_hint,
                                       Register receiver,
                                       Register name,
                                       Register scratch1,
diff --git a/src/top.cc b/src/top.cc
index b2583db..42a2b7e 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -45,6 +45,7 @@
 Address top_addresses[] = {
 #define C(name) reinterpret_cast<Address>(Top::name()),
     TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
 #undef C
     NULL
 };
@@ -91,6 +92,9 @@
 void Top::InitializeThreadLocal() {
   thread_local_.c_entry_fp_ = 0;
   thread_local_.handler_ = 0;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  thread_local_.js_entry_sp_ = 0;
+#endif
   thread_local_.stack_is_cooked_ = false;
   thread_local_.try_catch_handler_ = NULL;
   thread_local_.context_ = NULL;
diff --git a/src/top.h b/src/top.h
index 8e928ed..53d67e5 100644
--- a/src/top.h
+++ b/src/top.h
@@ -65,6 +65,9 @@
   // Stack.
   Address c_entry_fp_;  // the frame pointer of the top c entry frame
   Address handler_;   // try-blocks are chained through the stack
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+#endif
   bool stack_is_cooked_;
   inline bool stack_is_cooked() { return stack_is_cooked_; }
   inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
@@ -83,11 +86,20 @@
   C(pending_exception_address)         \
   C(external_caught_exception_address)
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define TOP_ADDRESS_LIST_PROF(C)       \
+  C(js_entry_sp_address)
+#else
+#define TOP_ADDRESS_LIST_PROF(C)
+#endif
+
+
 class Top {
  public:
   enum AddressId {
 #define C(name) k_##name,
     TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
 #undef C
     k_top_address_count
   };
@@ -179,6 +191,16 @@
   }
   static inline Address* handler_address() { return &thread_local_.handler_; }
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // Bottom JS entry (see StackTracer::Trace in log.cc).
+  static Address js_entry_sp(ThreadLocalTop* thread) {
+    return thread->js_entry_sp_;
+  }
+  static inline Address* js_entry_sp_address() {
+    return &thread_local_.js_entry_sp_;
+  }
+#endif
+
   // Generated code scratch locations.
   static void* formal_count_address() { return &thread_local_.formal_count_; }
 
diff --git a/src/v8natives.js b/src/v8natives.js
index 2edb191..fe46351 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -115,11 +115,15 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
-  if (this !== global && this !== %GlobalReceiver(global)) {
-    throw new $EvalError('The "this" object passed to eval must ' + 
+  var global_receiver = %GlobalReceiver(global);
+  var this_is_global_receiver = (this === global_receiver);
+  var global_is_detached = (global === global_receiver);
+
+  if (!this_is_global_receiver || global_is_detached) {
+    throw new $EvalError('The "this" object passed to eval must ' +
                          'be the global object from which eval originated');
   }
-  
+
   var f = %CompileString(x, false);
   if (!IS_FUNCTION(f)) return f;
 
diff --git a/src/version.cc b/src/version.cc
index f48a475..d613e94 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      7
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 6d3ed15..1822568 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -37,6 +37,11 @@
   return static_cast<Condition>(cc ^ 1);
 }
 
+// -----------------------------------------------------------------------------
+
+Immediate::Immediate(Smi* value) {
+  value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
@@ -51,66 +56,90 @@
 
 void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
   Memory::uint64_at(pc_) = x;
-  RecordRelocInfo(rmode, x);
+  if (rmode != RelocInfo::NONE) {
+    RecordRelocInfo(rmode, x);
+  }
+  pc_ += sizeof(uint64_t);
 }
 
 
-// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
-// REX.W is set.  REX.X is cleared.
+void Assembler::emitw(uint16_t x) {
+  Memory::uint16_at(pc_) = x;
+  pc_ += sizeof(uint16_t);
+}
+
+
 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
   emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
 }
 
 
-// The high bit of reg is used for REX.R, the high bit of op's base
-// register is used for REX.B, and the high bit of op's index register
-// is used for REX.X.  REX.W is set.
 void Assembler::emit_rex_64(Register reg, const Operand& op) {
   emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
 }
 
 
-// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
-// REX.W is set.  REX.X is cleared.
+void Assembler::emit_rex_64(Register rm_reg) {
+  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
+  emit(0x48 | (rm_reg.code() >> 3));
+}
+
+
+void Assembler::emit_rex_64(const Operand& op) {
+  emit(0x48 | op.rex_);
+}
+
+
 void Assembler::emit_rex_32(Register reg, Register rm_reg) {
   emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
 }
 
 
-// The high bit of reg is used for REX.R, the high bit of op's base
-// register is used for REX.B, and the high bit of op's index register
-// is used for REX.X.  REX.W is cleared.
 void Assembler::emit_rex_32(Register reg, const Operand& op) {
   emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
 }
 
 
-// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
-// REX.W and REX.X are cleared.  If no REX bits are set, no byte is emitted.
+void Assembler::emit_rex_32(Register rm_reg) {
+  emit(0x40 | (rm_reg.code() & 0x8) >> 3);
+}
+
+
+void Assembler::emit_rex_32(const Operand& op) {
+  emit(0x40 | op.rex_);
+}
+
+
 void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
   byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
-  if (rex_bits) emit(0x40 | rex_bits);
+  if (rex_bits != 0) emit(0x40 | rex_bits);
 }
 
 
-// The high bit of reg is used for REX.R, the high bit of op's base
-// register is used for REX.B, and the high bit of op's index register
-// is used for REX.X.  REX.W is cleared.  If no REX bits are set, nothing
-// is emitted.
 void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
   byte rex_bits =  (reg.code() & 0x8) >> 1 | op.rex_;
-  if (rex_bits) emit(0x40 | rex_bits);
+  if (rex_bits != 0) emit(0x40 | rex_bits);
 }
 
 
-void Assembler::set_target_address_at(byte* location, byte* value) {
-  UNIMPLEMENTED();
+void Assembler::emit_optional_rex_32(Register rm_reg) {
+  if (rm_reg.code() & 0x8 != 0) emit(0x41);
 }
 
 
-byte* Assembler::target_address_at(byte* location) {
-  UNIMPLEMENTED();
-  return NULL;
+void Assembler::emit_optional_rex_32(const Operand& op) {
+  if (op.rex_ != 0) emit(0x40 | op.rex_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return Memory::Address_at(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  Memory::Address_at(pc) = target;
+  CPU::FlushICache(pc, sizeof(intptr_t));
 }
 
 
@@ -151,6 +180,8 @@
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
 }
+
+
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return *reinterpret_cast<Object**>(pc_);
@@ -222,7 +253,7 @@
   len_ = 1;
   if (base.is(rsp) || base.is(r12)) {
     // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
-    set_sib(times_1, rsp, base);
+    set_sib(kTimes1, rsp, base);
   }
 
   if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
@@ -246,7 +277,7 @@
 
 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
   ASSERT(len_ == 1);
-  ASSERT((scale & -4) == 0);
+  ASSERT(is_uint2(scale));
   // Use SIB with no index register only for base rsp or r12.
   ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
   buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index acea713..77bbf52 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -77,7 +77,73 @@
 uint64_t CpuFeatures::enabled_ = 0;
 
 void CpuFeatures::Probe()  {
-  // TODO(X64): UNIMPLEMENTED
+  ASSERT(Heap::HasBeenSetup());
+  ASSERT(supported_ == 0);
+  if (Serializer::enabled()) return;  // No features if we might serialize.
+
+  Assembler assm(NULL, 0);
+  Label cpuid, done;
+#define __ assm.
+  // Save old esp, since we are going to modify the stack.
+  __ push(rbp);
+  __ pushfq();
+  __ push(rcx);
+  __ push(rbx);
+  __ movq(rbp, rsp);
+
+  // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+  __ pushfq();
+  __ pop(rax);
+  __ movq(rdx, rax);
+  __ xor_(rax, Immediate(0x200000));  // Flip bit 21.
+  __ push(rax);
+  __ popfq();
+  __ pushfq();
+  __ pop(rax);
+  __ xor_(rax, rdx);  // Different if CPUID is supported.
+  __ j(not_zero, &cpuid);
+
+  // CPUID not supported. Clear the supported features in edx:eax.
+  __ xor_(rax, rax);
+  __ jmp(&done);
+
+  // Invoke CPUID with 1 in eax to get feature information in
+  // ecx:edx. Temporarily enable CPUID support because we know it's
+  // safe here.
+  __ bind(&cpuid);
+  __ movq(rax, Immediate(1));
+  supported_ = (1 << CPUID);
+  { Scope fscope(CPUID);
+    __ cpuid();
+  }
+  supported_ = 0;
+
+  // Move the result from ecx:edx to rax and make sure to mark the
+  // CPUID feature as supported.
+  __ movl(rax, rdx);  // Zero-extended to 64 bits.
+  __ shl(rcx, Immediate(32));
+  __ or_(rax, rcx);
+  __ or_(rax, Immediate(1 << CPUID));
+
+  // Done.
+  __ bind(&done);
+  __ movq(rsp, rbp);
+  __ pop(rbx);
+  __ pop(rcx);
+  __ popfq();
+  __ pop(rbp);
+  __ ret(0);
+#undef __
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code =
+      Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+  if (!code->IsCode()) return;
+  LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+  typedef uint64_t (*F0)();
+  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  supported_ = probe();
 }
 
 // -----------------------------------------------------------------------------
@@ -271,12 +337,13 @@
 }
 
 
-void Assembler::emit_operand(Register reg, const Operand& adr) {
+void Assembler::emit_operand(int rm, const Operand& adr) {
+  ASSERT_EQ(rm & 0x07, rm);
   const unsigned length = adr.len_;
   ASSERT(length > 0);
 
-  // Emit updated ModRM byte containing the given register.
-  pc_[0] = (adr.buf_[0] & ~0x38) | ((reg.code() && 0x7) << 3);
+  // Emit updated ModR/M byte containing the given register.
+  pc_[0] = (adr.buf_[0] & ~0x38) | (rm << 3);
 
   // Emit the rest of the encoded operand.
   for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
@@ -300,7 +367,7 @@
   last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(opcode);
-  emit(0xC0 | (dst.code() & 0x7) << 3 | (src.code() & 0x7));
+  emit_modrm(dst, src);
 }
 
 void Assembler::immediate_arithmetic_op(byte subcode,
@@ -311,14 +378,14 @@
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
-    emit(0xC0 | (subcode << 3) | (dst.code() & 0x7));
+    emit_modrm(subcode, dst);
     emit(src.value_);
   } else if (dst.is(rax)) {
     emit(0x05 | (subcode << 3));
     emitl(src.value_);
   } else {
     emit(0x81);
-    emit(0xC0 | (subcode << 3) | (dst.code() & 0x7));
+    emit_modrm(subcode, dst);
     emitl(src.value_);
   }
 }
@@ -341,6 +408,52 @@
 }
 
 
+void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  if (shift_amount.value_ == 1) {
+    emit_rex_64(dst);
+    emit(0xD1);
+    emit_modrm(subcode, dst);
+  } else {
+    emit_rex_64(dst);
+    emit(0xC1);
+    emit_modrm(subcode, dst);
+    emit(shift_amount.value_);
+  }
+}
+
+
+void Assembler::shift(Register dst, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xD3);
+  emit_modrm(subcode, dst);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xA3);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xAB);
+  emit_operand(src, dst);
+}
+
+
 void Assembler::call(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -362,12 +475,50 @@
 }
 
 
+void Assembler::call(Register adr) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: FF /2 r64
+  if (adr.code() > 7) {
+    emit_rex_64(adr);
+  }
+  emit(0xFF);
+  emit_modrm(0x2, adr);
+}
+
+void Assembler::cpuid() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x0F);
+  emit(0xA2);
+}
+
+
+void Assembler::call(const Operand& op) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode: FF /2 m64
+  emit_rex_64(op);
+  emit(0xFF);
+  emit_operand(2, op);
+}
+
+
+void Assembler::cqo() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64();
+  emit(0x99);
+}
+
+
 void Assembler::dec(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
-  emit(0xC8 | (dst.code() & 0x7));
+  emit_modrm(0x1, dst);
 }
 
 
@@ -380,6 +531,15 @@
 }
 
 
+void Assembler::enter(Immediate size) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xC8);
+  emitw(size.value_);  // 16 bit operand, always.
+  emit(0);
+}
+
+
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -387,12 +547,47 @@
 }
 
 
+void Assembler::idiv(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);
+  emit(0xF7);
+  emit_modrm(0x7, src);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xAF);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, Immediate imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  if (is_int8(imm.value_)) {
+    emit(0x6B);
+    emit_modrm(dst, src);
+    emit(imm.value_);
+  } else {
+    emit(0x69);
+    emit_modrm(dst, src);
+    emitl(imm.value_);
+  }
+}
+
+
 void Assembler::inc(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
-  emit(0xC0 | (dst.code() & 0x7));
+  emit_modrm(0x0, dst);
 }
 
 
@@ -479,6 +674,110 @@
 }
 
 
+void Assembler::jmp(Register target) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // Opcode FF/4 r64
+  if (target.code() > 7) {
+    emit_rex_64(target);
+  }
+  emit(0xFF);
+  emit_modrm(0x4, target);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x8D);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x48);  // REX.W
+  emit(0xA1);
+  emitq(reinterpret_cast<uintptr_t>(value), mode);
+}
+
+
+void Assembler::load_rax(ExternalReference ref) {
+  load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::leave() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xC9);
+}
+
+
+void Assembler::movb(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst, src);
+  emit(0x8A);
+  emit_operand(dst, src);
+}
+
+void Assembler::movb(Register dst, Immediate imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst);
+  emit(0xC6);
+  emit_modrm(0x0, dst);
+  emit(imm.value_);
+}
+
+void Assembler::movb(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(src, dst);
+  emit(0x88);
+  emit_operand(src, dst);
+}
+
+void Assembler::movl(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x8B);
+  emit_modrm(dst, src);
+}
+
+
+void Assembler::movl(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movl(Register dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xC7);
+  emit_modrm(0x0, dst);
+  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
 void Assembler::movq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -493,7 +792,7 @@
   last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x8B);
-  emit(0xC0 | (dst.code() & 0x7) << 3 | (src.code() & 0x7));
+  emit_modrm(dst, src);
 }
 
 
@@ -502,26 +801,63 @@
   last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xC7);
-  emit(0xC0 | (dst.code() & 0x7));
+  emit_modrm(0x0, dst);
   emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
 }
 
 
+void Assembler::movq(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xB8 | (dst.code() & 0x7));
+  emitq(reinterpret_cast<uintptr_t>(value), rmode);
+}
+
+
 void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
-  emit(0xB8 | (dst.code() & 0x7));
+  emit(0xB8 | (dst.code() & 0x7));  // Not a ModR/M byte.
   emitq(value, rmode);
 }
 
 
+void Assembler::movq(Register dst, ExternalReference ref) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xB8 | (dst.code() & 0x7));
+  emitq(reinterpret_cast<uintptr_t>(ref.address()),
+        RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::mul(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src);
+  emit(0xF7);
+  emit_modrm(0x4, src);
+}
+
+
 void Assembler::neg(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
-  emit(0xC0 | (0x3 << 3) | (dst.code() & 0x7));
+  emit_modrm(0x3, dst);
 }
 
 
@@ -546,7 +882,7 @@
   last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
-  emit(0xC0 | (0x2 << 3) | (dst.code() & 0x7));
+  emit_modrm(0x2, dst);
 }
 
 
@@ -559,10 +895,97 @@
 }
 
 
+void Assembler::nop(int n) {
+  // The recommended muti-byte sequences of NOP instructions from the Intel 64
+  // and IA-32 Architectures Software Developer's Manual.
+  //
+  // Length   Assembly                                Byte Sequence
+  // 2 bytes  66 NOP                                  66 90H
+  // 3 bytes  NOP DWORD ptr [EAX]                     0F 1F 00H
+  // 4 bytes  NOP DWORD ptr [EAX + 00H]               0F 1F 40 00H
+  // 5 bytes  NOP DWORD ptr [EAX + EAX*1 + 00H]       0F 1F 44 00 00H
+  // 6 bytes  66 NOP DWORD ptr [EAX + EAX*1 + 00H]    66 0F 1F 44 00 00H
+  // 7 bytes  NOP DWORD ptr [EAX + 00000000H]         0F 1F 80 00 00 00 00H
+  // 8 bytes  NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+  // 9 bytes  66 NOP DWORD ptr [EAX + EAX*1 +         66 0F 1F 84 00 00 00 00
+  //          00000000H]                              00H
+
+  ASSERT(1 <= n);
+  ASSERT(n <= 9);
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  switch (n) {
+  case 1:
+    emit(0x90);
+    return;
+  case 2:
+    emit(0x66);
+    emit(0x90);
+    return;
+  case 3:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x00);
+    return;
+  case 4:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x40);
+    emit(0x00);
+    return;
+  case 5:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 6:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 7:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x80);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 8:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 9:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  }
+}
+
+
 void Assembler::pop(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (dst.code() & 0x8) {
+  if (dst.code() > 7) {
     emit_rex_64(dst);
   }
   emit(0x58 | (dst.code() & 0x7));
@@ -578,10 +1001,17 @@
 }
 
 
+void Assembler::popfq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9D);
+}
+
+
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (src.code() & 0x8) {
+  if (src.code() > 7) {
     emit_rex_64(src);
   }
   emit(0x50 | (src.code() & 0x7));
@@ -597,6 +1027,43 @@
 }
 
 
+void Assembler::push(Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (is_int8(value.value_)) {
+    emit(0x6A);
+    emit(value.value_);  // Emit low byte of value.
+  } else {
+    emit(0x68);
+    emitl(value.value_);
+  }
+}
+
+
+void Assembler::pushfq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x9C);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    emit_rex_64(dst);
+    emit(0xD1);
+    emit_modrm(0x2, dst);
+  } else {
+    emit_rex_64(dst);
+    emit(0xC1);
+    emit_modrm(0x2, dst);
+    emit(imm8);
+  }
+}
+
+
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -611,6 +1078,55 @@
 }
 
 
+void Assembler::shld(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xA5);
+  emit_modrm(src, dst);
+}
+
+
+void Assembler::shrd(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(src, dst);
+  emit(0x0F);
+  emit(0xAD);
+  emit_modrm(src, dst);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
+    Register other = src.is(rax) ? dst : src;
+    emit_rex_64(other);
+    emit(0x90 | (other.code() & 0x7));
+  } else {
+    emit_rex_64(src, dst);
+    emit(0x87);
+    emit_modrm(src, dst);
+  }
+}
+
+
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x48);  // REX.W
+  emit(0xA3);
+  emitq(reinterpret_cast<uintptr_t>(dst), mode);
+}
+
+
+void Assembler::store_rax(ExternalReference ref) {
+  store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
 void Assembler::testb(Register reg, Immediate mask) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -618,11 +1134,12 @@
     emit(0xA8);
     emit(mask);
   } else {
-    if (reg.code() & 0x8) {
-      emit_rex_32(rax, reg);
+    if (reg.code() > 3) {
+      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+      emit_rex_32(reg);
     }
     emit(0xF6);
-    emit(0xC0 | (reg.code() & 0x3));
+    emit_modrm(0x0, reg);
     emit(mask.value_);  // Low byte emitted.
   }
 }
@@ -647,7 +1164,7 @@
   } else {
     emit_optional_rex_32(rax, reg);
     emit(0xF7);
-    emit(0xC0 | (reg.code() & 0x3));
+    emit_modrm(0x0, reg);
     emit(mask);
   }
 }
@@ -663,6 +1180,24 @@
 }
 
 
+void Assembler::testq(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(reg, op);
+  emit(0x85);
+  emit_operand(reg, op);
+}
+
+
+void Assembler::testq(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x85);
+  emit_modrm(dst, src);
+}
+
+
 // Relocation information implementations
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 06a7c40..b488257 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -40,6 +40,19 @@
 namespace v8 {
 namespace internal {
 
+// Utility functions
+
+// Test whether a 64-bit value is in a specific range.
+static inline bool is_uint32(int64_t x) {
+  const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+  return x == x & kUInt32Mask;
+}
+
+static inline bool is_int32(int64_t x) {
+  const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+  return is_uint32(x - kMinIntValue);
+}
+
 // CPU Registers.
 //
 // 1) We would prefer to use an enum, but enum values are assignment-
@@ -225,10 +238,12 @@
 // Machine instruction Operands
 
 enum ScaleFactor {
-  times_1 = 0,
-  times_2 = 1,
-  times_4 = 2,
-  times_8 = 3
+  kTimes1 = 0,
+  kTimes2 = 1,
+  kTimes4 = 2,
+  kTimes8 = 3,
+  kTimesIntSize = kTimes4,
+  kTimesPointerSize = kTimes8
 };
 
 
@@ -255,7 +270,7 @@
   unsigned int len_;
   RelocInfo::Mode rmode_;
 
-  // Set the ModRM byte without an encoded 'reg' register. The
+  // Set the ModR/M byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
   // set_modrm can be called before or after set_sib and set_disp*.
   inline void set_modrm(int mod, Register rm);
@@ -292,11 +307,11 @@
   static void Probe();
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(Feature f) {
-    return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+    return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
   static bool IsEnabled(Feature f) {
-    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+    return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
@@ -305,7 +320,7 @@
     explicit Scope(Feature f) {
       ASSERT(CpuFeatures::IsSupported(f));
       old_enabled_ = CpuFeatures::enabled_;
-      CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+      CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
     }
     ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
    private:
@@ -353,8 +368,9 @@
   void GetCode(CodeDesc* desc);
 
   // Read/Modify the code target in the branch/call instruction at pc.
-  inline static Address target_address_at(Address pc);
-  inline static void set_target_address_at(Address pc, Address target);
+  // On the x64 architecture, the address is absolute, not relative.
+  static inline Address target_address_at(Address pc);
+  static inline void set_target_address_at(Address pc, Address target);
 
   // Distance between the address of the code target in the call instruction
   // and the return address
@@ -385,13 +401,10 @@
   void Align(int m);
 
   // Stack
-  void pushad();
-  void popad();
+  void pushfq();
+  void popfq();
 
-  void pushfd();
-  void popfd();
-
-  void push(const Immediate& x);
+  void push(Immediate value);
   void push(Register src);
   void push(const Operand& src);
   void push(Label* label, RelocInfo::Mode relocation_mode);
@@ -399,19 +412,27 @@
   void pop(Register dst);
   void pop(const Operand& dst);
 
-  void enter(const Immediate& size);
+  void enter(Immediate size);
   void leave();
 
   // Moves
   void movb(Register dst, const Operand& src);
-  void movb(const Operand& dst, int8_t imm8);
+  void movb(Register dst, Immediate imm);
   void movb(const Operand& dst, Register src);
 
+  void movl(Register dst, Register src);
+  void movl(Register dst, const Operand& src);
+  void movl(const Operand& dst, Register src);
+  // Load a 32-bit immediate value, zero-extended to 64 bits.
+  void movl(Register dst, Immediate imm32);
+
   void movq(Register dst, int32_t imm32);
-  void movq(Register dst, Immediate x);
   void movq(Register dst, const Operand& src);
+  // Sign extends immediate 32-bit value to 64 bits.
+  void movq(Register dst, Immediate x);
   void movq(Register dst, Register src);
-  void movq(const Operand& dst, const Immediate& x);
+
+  // Move 64 bit register value to 64-bit memory location.
   void movq(const Operand& dst, Register src);
 
   // New x64 instructions to load a 64-bit immediate into a register.
@@ -419,11 +440,14 @@
   void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
   void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
   void movq(Register dst, const char* s, RelocInfo::Mode rmode);
-  void movq(Register dst, const ExternalReference& ext, RelocInfo::Mode rmode);
+  // Moves the address of the external reference into the register.
+  void movq(Register dst, ExternalReference ext);
   void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
 
+
   // New x64 instruction to load from an immediate 64-bit pointer into RAX.
   void load_rax(void* ptr, RelocInfo::Mode rmode);
+  void load_rax(ExternalReference ext);
 
   void movsx_b(Register dst, const Operand& src);
 
@@ -513,18 +537,23 @@
   void dec(Register dst);
   void dec(const Operand& dst);
 
-  void cdq();
+  // Sign-extends rax into rdx:rax.
+  void cqo();
 
+  // Divide rdx:rax by src.  Quotient in rax, remainder in rdx.
   void idiv(Register src);
 
+  void imul(Register dst, Register src);
   void imul(Register dst, const Operand& src);
-  void imul(Register dst, Register src, int32_t imm32);
+  // Performs the operation dst = src * imm.
+  void imul(Register dst, Register src, Immediate imm);
 
   void inc(Register dst);
   void inc(const Operand& dst);
 
   void lea(Register dst, const Operand& src);
 
+  // Multiply rax by src, put the result in rdx:rax.
   void mul(Register src);
 
   void neg(Register dst);
@@ -556,21 +585,41 @@
 
   void rcl(Register dst, uint8_t imm8);
 
-  void sar(Register dst, uint8_t imm8);
-  void sar(Register dst);
+  // Shifts dst:src left by cl bits, affecting only dst.
+  void shld(Register dst, Register src);
 
-  void sbb(Register dst, const Operand& src);
+  // Shifts src:dst right by cl bits, affecting only dst.
+  void shrd(Register dst, Register src);
 
-  void shld(Register dst, const Operand& src);
+  // Shifts dst right, duplicating sign bit, by shift_amount bits.
+  // Shifting by 1 is handled efficiently.
+  void sar(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x7);
+  }
 
-  void shl(Register dst, uint8_t imm8);
-  void shl(Register dst);
+  // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+  void sar(Register dst) {
+    shift(dst, 0x7);
+  }
 
-  void shrd(Register dst, const Operand& src);
+  void shl(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x4);
+  }
 
-  void shr(Register dst, uint8_t imm8);
-  void shr(Register dst);
-  void shr_cl(Register dst);
+  void shl(Register dst) {
+    shift(dst, 0x4);
+  }
+
+  void shr(Register dst, Immediate shift_amount) {
+    shift(dst, shift_amount, 0x5);
+  }
+
+  void shr(Register dst) {
+    shift(dst, 0x5);
+  }
+
+  void store_rax(void* dst, RelocInfo::Mode mode);
+  void store_rax(ExternalReference ref);
 
   void sub(Register dst, Register src) {
     arithmetic_op(0x2B, dst, src);
@@ -596,6 +645,8 @@
   void testb(const Operand& op, Immediate mask);
   void testl(Register reg, Immediate mask);
   void testl(const Operand& op, Immediate mask);
+  void testq(const Operand& op, Register reg);
+  void testq(Register dst, Register src);
 
   void xor_(Register dst, Register src) {
     arithmetic_op(0x33, dst, src);
@@ -626,6 +677,7 @@
   void hlt();
   void int3();
   void nop();
+  void nop(int n);
   void rdtsc();
   void ret(int imm16);
 
@@ -647,16 +699,21 @@
   void bind(Label* L);  // binds an unbound label L to the current code position
 
   // Calls
+  // Call near relative 32-bit displacement, relative to next instruction.
   void call(Label* L);
-  void call(byte* entry, RelocInfo::Mode rmode);
-  void call(const Operand& adr);
-  void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Call near absolute indirect, address in register
+  void call(Register adr);
+
+  // Call near indirect
+  void call(const Operand& operand);
 
   // Jumps
+  // Jump short or near relative.
   void jmp(Label* L);  // unconditional jump to L
-  void jmp(byte* entry, RelocInfo::Mode rmode);
-  void jmp(const Operand& adr);
-  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Jump near absolute indirect (r64)
+  void jmp(Register adr);
 
   // Conditional jumps
   void j(Condition cc, Label* L);
@@ -808,6 +865,7 @@
   inline void emitl(uint32_t x);
   inline void emit(Handle<Object> handle);
   inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+  inline void emitw(uint16_t x);
   void emit(Immediate x) { emitl(x.value_); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
@@ -815,7 +873,6 @@
   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
   // REX.W is set.
   inline void emit_rex_64(Register reg, Register rm_reg);
-  void emit_rex_64(Register rm_reg) { emit_rex_64(rax, rm_reg); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
   // the top bit of the destination, index, and base register codes.
@@ -823,10 +880,25 @@
   // register is used for REX.B, and the high bit of op's index register
   // is used for REX.X.  REX.W is set.
   inline void emit_rex_64(Register reg, const Operand& op);
-  void emit_rex_64(const Operand& op) { emit_rex_64(rax, op); }
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the register code.
+  // The high bit of register is used for REX.B.
+  // REX.W is set and REX.R and REX.X are clear.
+  inline void emit_rex_64(Register rm_reg);
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the index and base register codes.
+  // The high bit of op's base register is used for REX.B, and the high
+  // bit of op's index register is used for REX.X.
+  // REX.W is set and REX.R clear.
+  inline void emit_rex_64(const Operand& op);
+
+  // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
+  void emit_rex_64() { emit(0x48); }
 
   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
-  // REX.W is set.
+  // REX.W is clear.
   inline void emit_rex_32(Register reg, Register rm_reg);
 
   // The high bit of reg is used for REX.R, the high bit of op's base
@@ -834,6 +906,14 @@
   // is used for REX.X.  REX.W is cleared.
   inline void emit_rex_32(Register reg, const Operand& op);
 
+  // High bit of rm_reg goes to REX.B.
+  // REX.W, REX.R and REX.X are clear.
+  inline void emit_rex_32(Register rm_reg);
+
+  // High bit of base goes to REX.B and high bit of index to REX.X.
+  // REX.W and REX.R are clear.
+  inline void emit_rex_32(const Operand& op);
+
   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
   // REX.W is cleared.  If no REX bits are set, no byte is emitted.
   inline void emit_optional_rex_32(Register reg, Register rm_reg);
@@ -844,13 +924,38 @@
   // is emitted.
   inline void emit_optional_rex_32(Register reg, const Operand& op);
 
-  // Emit the Mod/RM byte, and optionally the SIB byte and
+  // Optionally do as emit_rex_32(Register) if the register number has
+  // the high bit set.
+  inline void emit_optional_rex_32(Register rm_reg);
+
+  // Optionally do as emit_rex_32(const Operand&) if the operand register
+  // numbers have a high bit set.
+  inline void emit_optional_rex_32(const Operand& op);
+
+
+  // Emit the ModR/M byte, and optionally the SIB byte and
   // 1- or 4-byte offset for a memory operand.  Also encodes
   // the second operand of the operation, a register or operation
-  // subcode, into the Mod/RM byte.
-  void emit_operand(Register reg, const Operand& adr);
-  void emit_operand(int op_subcode, const Operand& adr) {
-    emit_operand(Register::toRegister(op_subcode), adr);
+  // subcode, into the reg field of the ModR/M byte.
+  void emit_operand(Register reg, const Operand& adr) {
+    emit_operand(reg.code() & 0x07, adr);
+  }
+
+  // Emit the ModR/M byte, and optionally the SIB byte and
+  // 1- or 4-byte offset for a memory operand.  Also used to encode
+  // a three-bit opcode extension into the ModR/M byte.
+  void emit_operand(int rm, const Operand& adr);
+
+  // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
+  void emit_modrm(Register reg, Register rm_reg) {
+    emit(0xC0 | (reg.code() & 0x7) << 3 | (rm_reg.code() & 0x7));
+  }
+
+  // Emit a ModR/M byte with an operation subcode in the reg field and
+  // a register in the rm_reg field.
+  void emit_modrm(int code, Register rm_reg) {
+    ASSERT((code & ~0x7) == 0);
+    emit(0xC0 | (code & 0x7) << 3 | (rm_reg.code() & 0x7));
   }
 
   // Emit the code-object-relative offset of the label's position
@@ -859,11 +964,15 @@
   // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
   // AND, OR, XOR, or CMP.  The encodings of these operations are all
   // similar, differing just in the opcode or in the reg field of the
-  // Mod/RM byte.
+  // ModR/M byte.
   void arithmetic_op(byte opcode, Register dst, Register src);
   void arithmetic_op(byte opcode, Register reg, const Operand& op);
   void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
   void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+  // Emit machine code for a shift operation.
+  void shift(Register dst, Immediate shift_amount, int subcode);
+  // Shift dst by cl % 64 bits.
+  void shift(Register dst, int subcode);
 
   void emit_farith(int b1, int b2, int i);
 
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
new file mode 100644
index 0000000..0d5b0e2
--- /dev/null
+++ b/src/x64/codegen-x64-inl.h
@@ -0,0 +1,42 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_X64_CODEGEN_X64_INL_H_
+#define V8_X64_CODEGEN_X64_INL_H_
+
+namespace v8 {
+namespace internal {
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { UNIMPLEMENTED(); }
+void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 3df5470..ca58e09 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -34,6 +34,14 @@
 namespace v8 {
 namespace internal {
 
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
+
+void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
+
+
 CodeGenerator::CodeGenerator(int buffer_size,
                              Handle<Script> script,
                              bool is_eval)
@@ -50,6 +58,9 @@
       in_spilled_code_(false) {
 }
 
+#define __ masm->
+
+
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
   UNIMPLEMENTED();
 }
@@ -229,10 +240,97 @@
   masm->int3();  // TODO(X64): UNIMPLEMENTED.
 }
 
+
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  masm->int3();  // TODO(X64): UNIMPLEMENTED.
+  Label invoke, exit;
+
+  // Setup frame.
+  __ push(rbp);
+  __ movq(rbp, rsp);
+
+  // Save callee-saved registers (X64 calling conventions).
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  // Push something that is not an arguments adaptor.
+  __ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL));
+  __ push(Immediate(Smi::FromInt(marker)));  // @ function offset
+  __ push(r12);
+  __ push(r13);
+  __ push(r14);
+  __ push(r15);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(rbx);
+  // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
+  // callee-save in JS code as well.
+
+  // Save copies of the top frame descriptor on the stack.
+  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+  __ load_rax(c_entry_fp);
+  __ push(rax);
+
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ store_rax(pending_exception);
+  __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+  __ jmp(&exit);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ push(rax);  // flush TOS
+
+  // Clear any pending exceptions.
+  __ load_rax(ExternalReference::the_hole_value_location());
+  __ store_rax(pending_exception);
+
+  // Fake a receiver (NULL).
+  __ push(Immediate(0));  // receiver
+
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. We load the address
+  // from an external reference instead of inlining the call target address
+  // directly in the code, because the builtin stubs may not have been
+  // generated yet at the time this code is generated.
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ load_rax(construct_entry);
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ load_rax(entry);
+  }
+  __ call(FieldOperand(rax, Code::kHeaderSize));
+
+  // Unlink this frame from the handler chain.
+  __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+  __ pop(Operand(kScratchRegister, 0));
+  // Pop next_sp.
+  __ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+  // Restore the top frame descriptor from the stack.
+  __ bind(&exit);
+  __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+  __ pop(Operand(kScratchRegister, 0));
+
+  // Restore callee-saved registers (X64 conventions).
+  __ pop(rbx);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(r15);
+  __ pop(r14);
+  __ pop(r13);
+  __ pop(r12);
+  __ add(rsp, Immediate(2 * kPointerSize));  // remove markers
+
+  // Restore frame pointer and return.
+  __ pop(rbp);
+  __ ret(0);
 }
 
 
+#undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 345e33a..3416f51 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -41,17 +41,17 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset  = -1 * kPointerSize;
-  static const int kPPOffset    = -1 * kPointerSize;
-  static const int kFPOffset    = -1 * kPointerSize;
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kPPOffset    = 1 * kPointerSize;
+  static const int kFPOffset    = 2 * kPointerSize;
 
-  static const int kCodeOffset  = -1 * kPointerSize;
+  static const int kCodeOffset  = 3 * kPointerSize;
 
-  static const int kStateOffset = -1 * kPointerSize;
-  static const int kPCOffset    = -1 * kPointerSize;
+  static const int kStateOffset = 4 * kPointerSize;
+  static const int kPCOffset    = 5 * kPointerSize;
 
   static const int kAddressDisplacement = -1 * kPointerSize;
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = 6 * kPointerSize;
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 5e6dd3d..54c299d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -29,6 +29,7 @@
 
 #include "bootstrapper.h"
 #include "codegen-inl.h"
+#include "macro-assembler-x64.h"
 
 namespace v8 {
 namespace internal {
@@ -41,8 +42,75 @@
     code_object_(Heap::undefined_value()) {
 }
 
+
 void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
   UNIMPLEMENTED();
 }
 
+
+void MacroAssembler::Set(Register dst, int64_t x) {
+  if (is_int32(x)) {
+    movq(dst, Immediate(x));
+  } else if (is_uint32(x)) {
+    movl(dst, Immediate(x));
+  } else {
+    movq(dst, x, RelocInfo::NONE);
+  }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+  if (is_int32(x)) {
+    movq(kScratchRegister, Immediate(x));
+  } else if (is_uint32(x)) {
+    movl(kScratchRegister, Immediate(x));
+  } else {
+    movq(kScratchRegister, x, RelocInfo::NONE);
+  }
+  movq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  // The pc (return address) is already on TOS.
+  // This code pushes state, code, frame pointer and parameter pointer.
+  // Check that they are expected next on the stack, int that order.
+  ASSERT_EQ(StackHandlerConstants::kStateOffset,
+            StackHandlerConstants::kPCOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kCodeOffset,
+            StackHandlerConstants::kStateOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kFPOffset,
+            StackHandlerConstants::kCodeOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kPPOffset,
+            StackHandlerConstants::kFPOffset - kPointerSize);
+
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
+    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+    push(rbp);
+    push(rdi);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The parameter pointer is meaningless here and ebp does not
+    // point to a JS frame. So we save NULL for both pp and ebp. We
+    // expect the code throwing an exception to check ebp before
+    // dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+    push(Immediate(0));  // NULL frame pointer
+    push(Immediate(0));  // NULL parameter pointer
+  }
+  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+  // Cached TOS.
+  movq(rax, Operand(kScratchRegister, 0));
+  // Link this handler.
+  movq(Operand(kScratchRegister, 0), rsp);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 67c7bdd..4af372a 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -33,6 +33,11 @@
 namespace v8 {
 namespace internal {
 
+// Default scratch register used by MacroAssembler (and other code that needs
+// a spare register). The register isn't callee save, and not used by the
+// function calling convention.
+static const Register kScratchRegister = r10;
+
 // Forward declaration.
 class JumpTarget;
 
@@ -137,8 +142,8 @@
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
   // Expression support
-  void Set(Register dst, const Immediate& x);
-  void Set(const Operand& dst, const Immediate& x);
+  void Set(Register dst, int64_t x);
+  void Set(const Operand& dst, int64_t x);
 
   // Compare object type for heap object.
   // Incoming register is heap_object and outgoing register is map.
@@ -156,7 +161,7 @@
 
   // Push a new try handler and link into try handler chain.
   // The return address must be pushed before calling this helper.
-  // On exit, eax contains TOS (next_sp).
+  // On exit, rax contains TOS (next_sp).
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
 
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index deba7aa..d341a1e 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -545,6 +545,8 @@
 
   bool Equals(VirtualFrame* other);
 
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
   friend class JumpTarget;
 };
 
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index dbb8172..48157d8 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -538,29 +538,36 @@
 
 
 THREADED_TEST(UsingExternalString) {
-  v8::HandleScope scope;
-  uint16_t* two_byte_string = AsciiToTwoByteString("test string");
-  Local<String> string = String::NewExternal(new TestResource(two_byte_string));
-  i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
-  // Trigger GCs so that the newly allocated string moves to old gen.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
-  i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
-  CHECK(isymbol->IsSymbol());
+  {
+    v8::HandleScope scope;
+    uint16_t* two_byte_string = AsciiToTwoByteString("test string");
+    Local<String> string =
+        String::NewExternal(new TestResource(two_byte_string));
+    i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+    // Trigger GCs so that the newly allocated string moves to old gen.
+    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+    CHECK(isymbol->IsSymbol());
+  }
+  i::Heap::CollectAllGarbage();
 }
 
 
 THREADED_TEST(UsingExternalAsciiString) {
-  v8::HandleScope scope;
-  const char* one_byte_string = "test string";
-  Local<String> string = String::NewExternal(
-      new TestAsciiResource(i::StrDup(one_byte_string)));
-  i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
-  // Trigger GCs so that the newly allocated string moves to old gen.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
-  i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
-  CHECK(isymbol->IsSymbol());
+  {
+    v8::HandleScope scope;
+    const char* one_byte_string = "test string";
+    Local<String> string = String::NewExternal(
+        new TestAsciiResource(i::StrDup(one_byte_string)));
+    i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+    // Trigger GCs so that the newly allocated string moves to old gen.
+    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
+    CHECK(isymbol->IsSymbol());
+  }
+  i::Heap::CollectAllGarbage();
 }
 
 
@@ -4508,10 +4515,6 @@
   v8::HandleScope scope;
   LocalContext current;
 
-  // This sets 'global' to the real global object (as opposed to the
-  // proxy). It is highly implementation dependent, so take care.
-  current->Global()->Set(v8_str("global"), current->Global()->GetPrototype());
-
   // Tests where aliased eval can only be resolved dynamically.
   Local<Script> script =
       Script::Compile(v8_str("function f(x) { "
@@ -4520,7 +4523,7 @@
                              "}"
                              "foo = 0;"
                              "result1 = f(new Object());"
-                             "result2 = f(global);"
+                             "result2 = f(this);"
                              "var x = new Object();"
                              "x.eval = function(x) { return 1; };"
                              "result3 = f(x);"));
@@ -4535,7 +4538,7 @@
                            "  var bar = 2;"
                            "  with (x) { return eval('bar'); }"
                            "}"
-                           "f(global)"));
+                           "f(this)"));
   script->Run();
   CHECK(try_catch.HasCaught());
   try_catch.Reset();
@@ -4622,6 +4625,44 @@
 }
 
 
+// Test that calling eval in a context which has been detached from
+// its global throws an exception.  This behavior is consistent with
+// other JavaScript implementations.
+THREADED_TEST(EvalInDetachedGlobal) {
+  v8::HandleScope scope;
+
+  v8::Persistent<Context> context0 = Context::New();
+  v8::Persistent<Context> context1 = Context::New();
+
+  // Setup function in context0 that uses eval from context0.
+  context0->Enter();
+  v8::Handle<v8::Value> fun =
+      CompileRun("var x = 42;"
+                 "(function() {"
+                 "  var e = eval;"
+                 "  return function(s) { return e(s); }"
+                 "})()");
+  context0->Exit();
+
+  // Put the function into context1 and call it before and after
+  // detaching the global.  Before detaching, the call succeeds and
+  // after detaching and exception is thrown.
+  context1->Enter();
+  context1->Global()->Set(v8_str("fun"), fun);
+  v8::Handle<v8::Value> x_value = CompileRun("fun('x')");
+  CHECK_EQ(42, x_value->Int32Value());
+  context0->DetachGlobal();
+  v8::TryCatch catcher;
+  x_value = CompileRun("fun('x')");
+  CHECK(x_value.IsEmpty());
+  CHECK(catcher.HasCaught());
+  context1->Exit();
+
+  context1.Dispose();
+  context0.Dispose();
+}
+
+
 THREADED_TEST(CrossLazyLoad) {
   v8::HandleScope scope;
   LocalContext other;
@@ -4820,6 +4861,23 @@
 }
 
 
+typedef v8::Handle<Value> (*NamedPropertyGetter)(Local<String> property,
+                                                 const AccessorInfo& info);
+
+
+static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
+                                   const char* source,
+                                   int expected) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(getter);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(source);
+  CHECK_EQ(expected, value->Int32Value());
+}
+
+
 static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name,
                                                  const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
@@ -4830,17 +4888,100 @@
 
 // This test should hit the load IC for the interceptor case.
 THREADED_TEST(InterceptorLoadIC) {
-  v8::HandleScope scope;
-  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
-  templ->SetNamedPropertyHandler(InterceptorLoadICGetter);
-  LocalContext context;
-  context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CheckInterceptorLoadIC(InterceptorLoadICGetter,
     "var result = 0;"
     "for (var i = 0; i < 1000; i++) {"
     "  result = o.x;"
-    "}");
-  CHECK_EQ(42, value->Int32Value());
+    "}",
+    42);
+}
+
+
+// Below go several tests which verify that JITing for various
+// configurations of interceptor and explicit fields works fine
+// (those cases are special cased to get better performance).
+
+static v8::Handle<Value> InterceptorLoadXICGetter(Local<String> name,
+                                                 const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  return v8_str("x")->Equals(name)
+      ? v8::Integer::New(42) : v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "var result = 0;"
+    "o.y = 239;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.y;"
+    "}",
+    239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithSubstitutedProto) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "var result = 0;"
+    "o.__proto__ = { 'y': 239 };"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.y + o.x;"
+    "}",
+    239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithPropertyOnProto) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "var result = 0;"
+    "o.__proto__.y = 239;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.y + o.x;"
+    "}",
+    239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICUndefined) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = (o.y == undefined) ? 239 : 42;"
+    "}",
+    239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithOverride) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "fst = new Object();  fst.__proto__ = o;"
+    "snd = new Object();  snd.__proto__ = fst;"
+    "var result1 = 0;"
+    "for (var i = 0; i < 1000;  i++) {"
+    "  result1 = snd.x;"
+    "}"
+    "fst.x = 239;"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = snd.x;"
+    "}"
+    "result + result1",
+    239 + 42);
+}
+
+
+static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  CHECK(v8_str("x")->Equals(name));
+  return v8::Integer::New(0);
+}
+
+
+THREADED_TEST(InterceptorReturningZero) {
+  CheckInterceptorLoadIC(InterceptorLoadICGetter0,
+     "o.x == undefined ? 1 : 0",
+     0);
 }
 
 
@@ -6616,6 +6757,74 @@
 }
 
 
+THREADED_TEST(ForceDelete) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+  LocalContext context(NULL, templ);
+  v8::Handle<v8::Object> global = context->Global();
+
+  // Ordinary properties
+  v8::Handle<v8::String> simple_property = v8::String::New("p");
+  global->Set(simple_property, v8::Int32::New(4), v8::DontDelete);
+  CHECK_EQ(4, global->Get(simple_property)->Int32Value());
+  // This should fail because the property is dont-delete.
+  CHECK(!global->Delete(simple_property));
+  CHECK_EQ(4, global->Get(simple_property)->Int32Value());
+  // This should succeed even though the property is dont-delete.
+  CHECK(global->ForceDelete(simple_property));
+  CHECK(global->Get(simple_property)->IsUndefined());
+}
+
+
+static int force_delete_interceptor_count = 0;
+static bool pass_on_delete = false;
+
+
+static v8::Handle<v8::Boolean> ForceDeleteDeleter(
+    v8::Local<v8::String> name,
+    const v8::AccessorInfo& info) {
+  force_delete_interceptor_count++;
+  if (pass_on_delete) {
+    return v8::Handle<v8::Boolean>();
+  } else {
+    return v8::True();
+  }
+}
+
+
+THREADED_TEST(ForceDeleteWithInterceptor) {
+  force_delete_interceptor_count = 0;
+  pass_on_delete = false;
+
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter);
+  LocalContext context(NULL, templ);
+  v8::Handle<v8::Object> global = context->Global();
+
+  v8::Handle<v8::String> some_property = v8::String::New("a");
+  global->Set(some_property, v8::Integer::New(42), v8::DontDelete);
+
+  // Deleting a property should get intercepted and nothing should
+  // happen.
+  CHECK_EQ(0, force_delete_interceptor_count);
+  CHECK(global->Delete(some_property));
+  CHECK_EQ(1, force_delete_interceptor_count);
+  CHECK_EQ(42, global->Get(some_property)->Int32Value());
+  // Deleting the property when the interceptor returns an empty
+  // handle should not delete the property since it is DontDelete.
+  pass_on_delete = true;
+  CHECK(!global->Delete(some_property));
+  CHECK_EQ(2, force_delete_interceptor_count);
+  CHECK_EQ(42, global->Get(some_property)->Int32Value());
+  // Forcing the property to be deleted should delete the value
+  // without calling the interceptor.
+  CHECK(global->ForceDelete(some_property));
+  CHECK(global->Get(some_property)->IsUndefined());
+  CHECK_EQ(2, force_delete_interceptor_count);
+}
+
+
 v8::Persistent<Context> calling_context0;
 v8::Persistent<Context> calling_context1;
 v8::Persistent<Context> calling_context2;
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 452b31f..515657f 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -553,7 +553,7 @@
   CHECK(obj->HasLocalProperty(first));
 
   // delete first
-  CHECK(obj->DeleteProperty(first));
+  CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
   CHECK(!obj->HasLocalProperty(first));
 
   // add first and then second
@@ -563,9 +563,9 @@
   CHECK(obj->HasLocalProperty(second));
 
   // delete first and then second
-  CHECK(obj->DeleteProperty(first));
+  CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
   CHECK(obj->HasLocalProperty(second));
-  CHECK(obj->DeleteProperty(second));
+  CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
   CHECK(!obj->HasLocalProperty(first));
   CHECK(!obj->HasLocalProperty(second));
 
@@ -576,9 +576,9 @@
   CHECK(obj->HasLocalProperty(second));
 
   // delete second and then first
-  CHECK(obj->DeleteProperty(second));
+  CHECK(obj->DeleteProperty(second, JSObject::NORMAL_DELETION));
   CHECK(obj->HasLocalProperty(first));
-  CHECK(obj->DeleteProperty(first));
+  CHECK(obj->DeleteProperty(first, JSObject::NORMAL_DELETION));
   CHECK(!obj->HasLocalProperty(first));
   CHECK(!obj->HasLocalProperty(second));
 
diff --git a/test/cctest/test-log-ia32.cc b/test/cctest/test-log-ia32.cc
index 7312901..a40a800 100644
--- a/test/cctest/test-log-ia32.cc
+++ b/test/cctest/test-log-ia32.cc
@@ -37,13 +37,11 @@
 
 
 static struct {
-  StackTracer* tracer;
   TickSample* sample;
-} trace_env = { NULL, NULL };
+} trace_env = { NULL };
 
 
-static void InitTraceEnv(StackTracer* tracer, TickSample* sample) {
-  trace_env.tracer = tracer;
+static void InitTraceEnv(TickSample* sample) {
   trace_env.sample = sample;
 }
 
@@ -53,7 +51,7 @@
   // sp is only used to define stack high bound
   trace_env.sample->sp =
       reinterpret_cast<unsigned int>(trace_env.sample) - 10240;
-  trace_env.tracer->Trace(trace_env.sample);
+  StackTracer::Trace(trace_env.sample);
 }
 
 
@@ -99,6 +97,8 @@
       v8::Handle<String> name);
   static v8::Handle<v8::Value> Trace(const v8::Arguments& args);
   static v8::Handle<v8::Value> JSTrace(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSEntrySP(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSEntrySPLevel2(const v8::Arguments& args);
  private:
   static Address GetFP(const v8::Arguments& args);
   static const char* kSource;
@@ -107,8 +107,9 @@
 
 const char* TraceExtension::kSource =
     "native function trace();"
-    "native function js_trace();";
-
+    "native function js_trace();"
+    "native function js_entry_sp();"
+    "native function js_entry_sp_level2();";
 
 v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
     v8::Handle<String> name) {
@@ -116,6 +117,10 @@
     return v8::FunctionTemplate::New(TraceExtension::Trace);
   } else if (name->Equals(String::New("js_trace"))) {
     return v8::FunctionTemplate::New(TraceExtension::JSTrace);
+  } else if (name->Equals(String::New("js_entry_sp"))) {
+    return v8::FunctionTemplate::New(TraceExtension::JSEntrySP);
+  } else if (name->Equals(String::New("js_entry_sp_level2"))) {
+    return v8::FunctionTemplate::New(TraceExtension::JSEntrySPLevel2);
   } else {
     CHECK(false);
     return v8::Handle<v8::FunctionTemplate>();
@@ -143,6 +148,34 @@
 }
 
 
+static Address GetJsEntrySp() {
+  CHECK_NE(NULL, Top::GetCurrentThread());
+  return Top::js_entry_sp(Top::GetCurrentThread());
+}
+
+
+v8::Handle<v8::Value> TraceExtension::JSEntrySP(const v8::Arguments& args) {
+  CHECK_NE(0, GetJsEntrySp());
+  return v8::Undefined();
+}
+
+
+static void CompileRun(const char* source) {
+  Script::Compile(String::New(source))->Run();
+}
+
+
+v8::Handle<v8::Value> TraceExtension::JSEntrySPLevel2(
+    const v8::Arguments& args) {
+  v8::HandleScope scope;
+  const Address js_entry_sp = GetJsEntrySp();
+  CHECK_NE(0, js_entry_sp);
+  CompileRun("js_entry_sp();");
+  CHECK_EQ(js_entry_sp, GetJsEntrySp());
+  return v8::Undefined();
+}
+
+
 static TraceExtension kTraceExtension;
 v8::DeclareExtension kTraceExtensionDeclaration(&kTraceExtension);
 
@@ -164,11 +197,6 @@
 }
 
 
-static void CompileRun(const char* source) {
-  Script::Compile(String::New(source))->Run();
-}
-
-
 static Local<Value> GetGlobalProperty(const char* name) {
   return env->Global()->Get(String::New(name));
 }
@@ -255,8 +283,7 @@
 
 TEST(CFromJSStackTrace) {
   TickSample sample;
-  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
-  InitTraceEnv(&tracer, &sample);
+  InitTraceEnv(&sample);
 
   InitializeVM();
   v8::HandleScope scope;
@@ -277,8 +304,7 @@
 
 TEST(PureJSStackTrace) {
   TickSample sample;
-  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
-  InitTraceEnv(&tracer, &sample);
+  InitTraceEnv(&sample);
 
   InitializeVM();
   v8::HandleScope scope;
@@ -323,11 +349,22 @@
 
 TEST(PureCStackTrace) {
   TickSample sample;
-  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
-  InitTraceEnv(&tracer, &sample);
+  InitTraceEnv(&sample);
   // Check that sampler doesn't crash
   CHECK_EQ(10, CFunc(10));
 }
 
 
+TEST(JsEntrySp) {
+  InitializeVM();
+  v8::HandleScope scope;
+  CHECK_EQ(0, GetJsEntrySp());
+  CompileRun("a = 1; b = a + 1;");
+  CHECK_EQ(0, GetJsEntrySp());
+  CompileRun("js_entry_sp();");
+  CHECK_EQ(0, GetJsEntrySp());
+  CompileRun("js_entry_sp_level2();");
+  CHECK_EQ(0, GetJsEntrySp());
+}
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 005be47..f3f7efc 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -166,17 +166,19 @@
                   "for (var i = 0; i < 1000; ++i) { "
                   "(function(x) { return %d * x; })(i); }",
                   log_pos);
-  // Run code for 200 msecs to get some ticks. Use uint to always have
-  // non-negative delta.
-  const uint64_t started_us = i::OS::Ticks();
-  uint64_t delta;
-  while ((delta = i::OS::Ticks() - started_us) < 200 * 1000) {
+  // Run code for 200 msecs to get some ticks.
+  const double end_time = i::OS::TimeCurrentMillis() + 200;
+  while (i::OS::TimeCurrentMillis() < end_time) {
     CompileAndRunScript(script_src.start());
   }
 
   Logger::PauseProfiler();
   CHECK(!LoggerTestHelper::IsSamplerActive());
 
+  // Wait 50 msecs to allow Profiler thread to process the last
+  // tick sample it has got.
+  i::OS::Sleep(50);
+
   // Now we must have compiler and tick records.
   int log_size = GetLogLines(log_pos, &buffer);
   printf("log_size: %d\n", log_size);
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 7fd1044..23b3254 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -152,6 +152,13 @@
   CHECK_EQ(0, FastD2I(0.345));
   CHECK_EQ(1, FastD2I(1.234));
   CHECK_EQ(1000000, FastD2I(1000000.123));
+  // Check that >> is implemented as arithmetic shift right.
+  // If this is not true, then ArithmeticShiftRight() must be changed,
+  // There are also documented right shifts in assembler.cc of
+  // int8_t and intptr_t signed integers.
+  CHECK_EQ(-2, -8 >> 2);
+  CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2);
+  CHECK_EQ(-2, static_cast<intptr_t>(-8) >> 2);
 }
 
 
@@ -177,32 +184,3 @@
     buffer.Dispose();
   }
 }
-
-
-// Issue 358: When copying EmbeddedVector, Vector::start_ must point
-// to the buffer in the copy, not in the source.
-TEST(EmbeddedVectorCopy) {
-  EmbeddedVector<int, 1> src;
-  src[0] = 100;
-  EmbeddedVector<int, 1> dst = src;
-  CHECK_NE(src.start(), dst.start());
-  CHECK_EQ(src[0], dst[0]);
-  src[0] = 200;
-  CHECK_NE(src[0], dst[0]);
-}
-
-
-// Also Issue 358, assignment case.
-TEST(EmbeddedVectorAssign) {
-  EmbeddedVector<int, 1> src;
-  src[0] = 100;
-  EmbeddedVector<int, 1> dst;
-  dst[0] = 200;
-  CHECK_NE(src.start(), dst.start());
-  CHECK_NE(src[0], dst[0]);
-  dst = src;
-  CHECK_NE(src.start(), dst.start());
-  CHECK_EQ(src[0], dst[0]);
-  src[0] = 200;
-  CHECK_NE(src[0], dst[0]);
-}
diff --git a/test/mjsunit/codegen_coverage.js b/test/mjsunit/codegen-coverage.js
similarity index 100%
rename from test/mjsunit/codegen_coverage.js
rename to test/mjsunit/codegen-coverage.js
diff --git a/test/mjsunit/debug-compile-event.js b/test/mjsunit/debug-compile-event.js
index 18975de..c346f76 100644
--- a/test/mjsunit/debug-compile-event.js
+++ b/test/mjsunit/debug-compile-event.js
@@ -32,8 +32,11 @@
 var exception = false;  // Exception in debug event listener.
 var before_compile_count = 0;
 var after_compile_count = 0;
-var current_source = '';  // Current source compiled.
-var source_count = 0;  // Total number of scource sompiled.
+var current_source = '';  // Current source being compiled.
+var source_count = 0;  // Total number of scources compiled.
+var host_compilations = 0;  // Number of scources compiled through the API.
+var eval_compilations = 0;  // Number of scources compiled through eval.
+var json_compilations = 0;  // Number of scources compiled through JSON.parse.
 
 
 function compileSource(source) {
@@ -52,14 +55,32 @@
         before_compile_count++;
       } else {
         after_compile_count++;
+        switch (event_data.script().compilationType()) {
+          case Debug.ScriptCompilationType.Host:
+            host_compilations++;
+            break;
+          case Debug.ScriptCompilationType.Eval:
+            eval_compilations++;
+            break;
+          case Debug.ScriptCompilationType.JSON:
+            json_compilations++;
+            break;
+        }
       }
-      
+
       // If the compiled source contains 'eval' there will be additional compile
       // events for the source inside eval.
       if (current_source.indexOf('eval') == 0) {
         // For source with 'eval' there will be compile events with substrings
         // as well as with with the exact source.
         assertTrue(current_source.indexOf(event_data.script().source()) >= 0);
+      } else if (current_source.indexOf('JSON.parse') == 0) {
+        // For JSON the JSON source will be in parentheses.
+        var s = event_data.script().source();
+        if (s[0] == '(') {
+          s = s.substring(1, s.length - 2);
+        }
+        assertTrue(current_source.indexOf(s) >= 0);
       } else {
         // For source without 'eval' there will be a compile events with the
         // exact source.
@@ -86,6 +107,8 @@
 source_count++;  // Using eval causes additional compilation event.
 compileSource('eval("eval(\'function(){return a;}\')")');
 source_count += 2;  // Using eval causes additional compilation event.
+compileSource('JSON.parse("{a:1,b:2}")');
+source_count++;  // Using JSON.parse causes additional compilation event.
 
 // Make sure that the debug event listener was invoked.
 assertFalse(exception, "exception in listener")
@@ -93,7 +116,11 @@
 // Number of before and after compile events should be the same.
 assertEquals(before_compile_count, after_compile_count);
 
-// Check the actual number of events.
+// Check the actual number of events (no compilation through the API as all
+// source compiled through eval except for one JSON.parse call).
 assertEquals(source_count, after_compile_count);
+assertEquals(0, host_compilations);
+assertEquals(source_count - 1, eval_compilations);
+assertEquals(1, json_compilations);
 
 Debug.setListener(null);
diff --git a/test/mjsunit/mirror-script.js b/test/mjsunit/mirror-script.js
index 2ffa44f..9b67b9b 100644
--- a/test/mjsunit/mirror-script.js
+++ b/test/mjsunit/mirror-script.js
@@ -25,10 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
 // Test the mirror object for scripts.
 
-function testScriptMirror(f, file_name, file_lines, script_type, script_source) {
+function testScriptMirror(f, file_name, file_lines, type, compilation_type,
+                          source, eval_from_line) {
   // Create mirror and JSON representation.
   var mirror = debug.MakeMirror(f).script();
   var serializer = debug.MakeMirrorSerializer();
@@ -53,13 +54,17 @@
   if (file_lines > 0) {
     assertEquals(file_lines, mirror.lineCount());
   }
-  assertEquals(script_type, mirror.scriptType());
-  if (script_source) {
-    assertEquals(script_source, mirror.source());
+  assertEquals(type, mirror.scriptType());
+  assertEquals(compilation_type, mirror.compilationType(), "compilation type");
+  if (source) {
+    assertEquals(source, mirror.source());
+  }
+  if (eval_from_line) {
+    assertEquals(eval_from_line,  mirror.evalFromLocation().line);
   }
   
   // Parse JSON representation and check.
-  var fromJSON = eval('(' + json + ')');
+  var fromJSON = JSON.parse(json);
   assertEquals('script', fromJSON.type);
   name = fromJSON.name;
   if (name) {
@@ -72,15 +77,18 @@
   if (file_lines > 0) {
     assertEquals(file_lines, fromJSON.lineCount);
   }
-  assertEquals(script_type, fromJSON.scriptType);  
+  assertEquals(type, fromJSON.scriptType);
+  assertEquals(compilation_type, fromJSON.compilationType);
 }
 
 
 // Test the script mirror for different functions.
-testScriptMirror(function(){}, 'mirror-script.js', 92, 2);
-testScriptMirror(Math.sin, 'native math.js', -1, 0);
-testScriptMirror(eval('function(){}'), null, 1, 2, 'function(){}');
-testScriptMirror(eval('function(){\n  }'), null, 2, 2, 'function(){\n  }');
+testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
+testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
+testScriptMirror(eval('function(){}'), null, 1, 2, 1, 'function(){}', 87);
+testScriptMirror(eval('function(){\n  }'), null, 2, 2, 1, 'function(){\n  }', 88);
+testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
+testScriptMirror(%CompileString("({a:1,\n  b:2})", true), null, 2, 2, 2, '({a:1,\n  b:2})');
 
 // Test taking slices of source.
 var mirror = debug.MakeMirror(eval('function(){\n  1;\n}')).script();
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 7b9e3ac..66e1bb6 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -307,8 +307,6 @@
                 'cflags': [
                     # Avoid gcc 4.4 strict aliasing issues in dtoa.c
                     '-fno-strict-aliasing',
-                    # Avoid gcc 4.4 mksnapshot segfault.
-                    '-fno-tree-vectorize',
                     # Avoid crashes with gcc 4.4 in the v8 test suite.
                     '-fno-tree-vrp',
                 ],
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index e2f38b1..968c241 100644
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -1,16 +1,15 @@
 #!/bin/sh
 
-tools_dir=$(dirname "$0")
-d8_exec=$tools_dir/../d8
+tools_path=`cd $(dirname "$0");pwd`
+[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
 
-# compile d8 if it doesn't exist.
-if [ ! -x $d8_exec ]
-then
-  scons -C $tools_dir/.. d8
-fi
+# compile d8 if it doesn't exist, assuming this script
+# resides in the repository.
+[ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
 
 # nm spits out 'no symbols found' messages to stderr.
-$d8_exec $tools_dir/splaytree.js $tools_dir/codemap.js \
-  $tools_dir/csvparser.js $tools_dir/consarray.js \
-  $tools_dir/profile.js $tools_dir/profile_view.js \
-  $tools_dir/tickprocessor.js -- $@ 2>/dev/null
+$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+  $tools_path/csvparser.js $tools_path/consarray.js \
+  $tools_path/profile.js $tools_path/profile_view.js \
+  $tools_path/tickprocessor.js -- $@ 2>/dev/null
diff --git a/tools/profile_view.js b/tools/profile_view.js
index 9d196a3..bdea631 100644
--- a/tools/profile_view.js
+++ b/tools/profile_view.js
@@ -53,6 +53,7 @@
     callTree, opt_bottomUpViewWeights) {
   var head;
   var samplingRate = this.samplingRate;
+  var createViewNode = this.createViewNode;
   callTree.traverse(function(node, viewParent) {
     var totalWeight = node.totalWeight * samplingRate;
     var selfWeight = node.selfWeight * samplingRate;
@@ -63,8 +64,7 @@
         selfWeight = 0;
       }
     }
-    var viewNode = new devtools.profiler.ProfileView.Node(
-        node.label, totalWeight, selfWeight, head);
+    var viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
     if (viewParent) {
       viewParent.addChild(viewNode);
     } else {
@@ -72,26 +72,50 @@
     }
     return viewNode;
   });
-  var view = new devtools.profiler.ProfileView(head);
+  var view = this.createView(head);
   return view;
 };
 
 
 /**
+ * Factory method for a profile view.
+ *
+ * @param {devtools.profiler.ProfileView.Node} head View head node.
+ * @return {devtools.profiler.ProfileView} Profile view.
+ */
+devtools.profiler.ViewBuilder.prototype.createView = function(head) {
+  return new devtools.profiler.ProfileView(head);
+};
+
+
+/**
+ * Factory method for a profile view node.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ *     corresponding function and its descendants (not that depending on
+ *     profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ *     corresponding function only.
+ * @param {devtools.profiler.ProfileView.Node} head Profile view head.
+ * @return {devtools.profiler.ProfileView.Node} Profile view node.
+ */
+devtools.profiler.ViewBuilder.prototype.createViewNode = function(
+    funcName, totalTime, selfTime, head) {
+  return new devtools.profiler.ProfileView.Node(
+      funcName, totalTime, selfTime, head);
+};
+
+
+/**
  * Creates a Profile View object. It allows to perform sorting
- * and filtering actions on the profile. Profile View mimicks
- * the Profile object from WebKit's JSC profiler.
+ * and filtering actions on the profile.
  *
  * @param {devtools.profiler.ProfileView.Node} head Head (root) node.
  * @constructor
  */
 devtools.profiler.ProfileView = function(head) {
   this.head = head;
-  this.title = '';
-  this.uid = '';
-  this.heavyProfile = null;
-  this.treeProfile = null;
-  this.flatProfile = null;
 };
 
 
@@ -140,63 +164,12 @@
  */
 devtools.profiler.ProfileView.Node = function(
     internalFuncName, totalTime, selfTime, head) {
-  this.callIdentifier = 0;
   this.internalFuncName = internalFuncName;
-  this.initFuncInfo();
   this.totalTime = totalTime;
   this.selfTime = selfTime;
   this.head = head;
   this.parent = null;
   this.children = [];
-  this.visible = true;
-};
-
-
-/**
- * RegEx for stripping V8's prefixes of compiled functions.
- */
-devtools.profiler.ProfileView.Node.FUNC_NAME_STRIP_RE =
-    /^(?:LazyCompile|Function): (.*)$/;
-
-
-/**
- * RegEx for extracting script source URL and line number.
- */
-devtools.profiler.ProfileView.Node.FUNC_NAME_PARSE_RE = /^([^ ]+) (.*):(\d+)$/;
-
-
-/**
- * RegEx for removing protocol name from URL.
- */
-devtools.profiler.ProfileView.Node.URL_PARSE_RE = /^(?:http:\/)?.*\/([^/]+)$/;
-
-
-/**
- * Inits 'functionName', 'url', and 'lineNumber' fields using 'internalFuncName'
- * field.
- */
-devtools.profiler.ProfileView.Node.prototype.initFuncInfo = function() {
-  var nodeAlias = devtools.profiler.ProfileView.Node;
-  this.functionName = this.internalFuncName;
-
-  var strippedName = nodeAlias.FUNC_NAME_STRIP_RE.exec(this.functionName);
-  if (strippedName) {
-    this.functionName = strippedName[1];
-  }
-
-  var parsedName = nodeAlias.FUNC_NAME_PARSE_RE.exec(this.functionName);
-  if (parsedName) {
-    this.url = parsedName[2];
-    var parsedUrl = nodeAlias.URL_PARSE_RE.exec(this.url);
-    if (parsedUrl) {
-      this.url = parsedUrl[1];
-    }
-    this.functionName = parsedName[1];
-    this.lineNumber = parsedName[3];
-  } else {
-    this.url = '';
-    this.lineNumber = 0;
-  }
 };
 
 
diff --git a/tools/test.py b/tools/test.py
index 9981e8c..6bd536b 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -164,6 +164,8 @@
       print "Command: %s" % EscapeCommand(failed.command)
       if failed.HasCrashed():
         print "--- CRASHED ---"
+      if failed.HasTimedOut():
+        print "--- TIMEOUT ---"
     if len(self.failed) == 0:
       print "==="
       print "=== All tests succeeded"
@@ -207,6 +209,9 @@
       if output.HasCrashed():
         sys.stdout.write('C')
         sys.stdout.flush()
+      elif output.HasTimedOut():
+        sys.stdout.write('T')
+        sys.stdout.flush()
       else:
         sys.stdout.write('F')
         sys.stdout.flush()
@@ -245,6 +250,8 @@
       print "Command: %s" % EscapeCommand(output.command)
       if output.HasCrashed():
         print "--- CRASHED ---"
+      if output.HasTimedOut():
+        print "--- TIMEOUT ---"
 
   def Truncate(self, str, length):
     if length and (len(str) > (length - 3)):
@@ -381,6 +388,9 @@
       return self.output.exit_code < 0 and \
              self.output.exit_code != -signal.SIGABRT
 
+  def HasTimedOut(self):
+    return self.output.timed_out;
+    
   def HasFailed(self):
     execution_failed = self.test.DidFail(self.output)
     if self.test.IsNegative():
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 196daa9..477ab26 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -273,10 +273,6 @@
     this.printCounter(this.ticks_.unaccounted, this.ticks_.total);
   }
 
-  // Disable initialization of 'funcName', 'url', 'lineNumber' as
-  // we don't use it and it just wastes time.
-  devtools.profiler.ProfileView.Node.prototype.initFuncInfo = function() {};
-
   var flatProfile = this.profile_.getFlatProfile();
   var flatView = this.viewBuilder_.buildView(flatProfile);
   // Sort by self time, desc, then by name, desc.