Update V8 to r3431 as required by WebKit r51976.

Change-Id: I567392c3f8c0a0d5201a4249611ac4ccf468cd5b
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8f078ff..9c7f9b6 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -176,7 +176,7 @@
 
 
 void Assembler::set_target_address_at(Address pc, Address target) {
-  Memory::int32_at(pc) = target - pc - 4;
+  Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
   CPU::FlushICache(pc, sizeof(int32_t));
 }
 
@@ -191,13 +191,13 @@
 void RelocInfo::apply(intptr_t delta) {
   if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
-    Memory::Address_at(pc_) += delta;
+    Memory::Address_at(pc_) += static_cast<int32_t>(delta);
   } else if (IsCodeTarget(rmode_)) {
-    Memory::int32_at(pc_) -= delta;
+    Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
   } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
-    Memory::int32_at(pc_ + 1) -= delta;  // relocate entry
+    Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta);  // relocate entry
   }
 }
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 61e8753..2d524ea 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -80,11 +80,15 @@
 //   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
 uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
 uint64_t CpuFeatures::enabled_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 void CpuFeatures::Probe()  {
   ASSERT(Heap::HasBeenSetup());
   ASSERT(supported_ == kDefaultCpuFeatures);
-  if (Serializer::enabled()) return;  // No features if we might serialize.
+  if (Serializer::enabled()) {
+    supported_ |= OS::CpuFeaturesImpliedByPlatform();
+    return;  // No features if we might serialize.
+  }
 
   Assembler assm(NULL, 0);
   Label cpuid, done;
@@ -160,6 +164,11 @@
   typedef uint64_t (*F0)();
   F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
   supported_ = probe();
+  found_by_runtime_probing_ = supported_;
+  found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
+  uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= os_guarantees;
+  found_by_runtime_probing_ &= ~os_guarantees;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
@@ -337,7 +346,8 @@
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
   ASSERT(desc->instr_size > 0);  // Zero-size code objects upset the system.
-  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->reloc_size =
+      static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   desc->origin = this;
 
   Counters::reloc_info_size.Increment(desc->reloc_size);
@@ -400,7 +410,8 @@
   // setup new buffer
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
-  desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+  desc.reloc_size =
+      static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
 
   // Clear the buffer in debug mode. Use 'int3' instructions to make
   // sure to get into problems if we ever run uninitialized code.
@@ -887,7 +898,7 @@
 
 
 void Assembler::cpuid() {
-  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0x0F);
@@ -2045,7 +2056,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_optional_rex_32(adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 4f514f2..fa7d33b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -37,6 +37,8 @@
 #ifndef V8_X64_ASSEMBLER_X64_H_
 #define V8_X64_ASSEMBLER_X64_H_
 
+#include "serialize.h"
+
 namespace v8 {
 namespace internal {
 
@@ -362,20 +364,11 @@
 //   }
 class CpuFeatures : public AllStatic {
  public:
-  // Feature flags bit positions. They are mostly based on the CPUID spec.
-  // (We assign CPUID itself to one of the currently reserved bits --
-  // feel free to change this if needed.)
-  enum Feature { SSE3 = 32,
-                 SSE2 = 26,
-                 CMOV = 15,
-                 RDTSC = 4,
-                 CPUID = 10,
-                 SAHF = 0};
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
   static void Probe();
   // Check whether a feature is supported by the target CPU.
-  static bool IsSupported(Feature f) {
+  static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -384,33 +377,35 @@
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
-  static bool IsEnabled(Feature f) {
+  static bool IsEnabled(CpuFeature f) {
     return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(Feature f) {
+    explicit Scope(CpuFeature f) {
+      uint64_t mask = (V8_UINT64_C(1) << f);
       ASSERT(CpuFeatures::IsSupported(f));
+      ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
       old_enabled_ = CpuFeatures::enabled_;
-      CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
+      CpuFeatures::enabled_ |= mask;
     }
     ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
    private:
     uint64_t old_enabled_;
 #else
    public:
-    explicit Scope(Feature f) {}
+    explicit Scope(CpuFeature f) {}
 #endif
   };
  private:
   // Safe defaults include SSE2 and CMOV for X64. It is always available, if
   // anyone checks, but they shouldn't need to check.
-  static const uint64_t kDefaultCpuFeatures =
-      (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
+  static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
   static uint64_t supported_;
   static uint64_t enabled_;
+  static uint64_t found_by_runtime_probing_;
 };
 
 
@@ -458,7 +453,25 @@
   // the relative displacements stored in the code.
   static inline Address target_address_at(Address pc);
   static inline void set_target_address_at(Address pc, Address target);
+
+  // This sets the branch destination (which is in the instruction on x64).
+  // This is for calls and branches within generated code.
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  // This sets the branch destination (which is a load instruction on x64).
+  // This is for calls and branches to runtime code.
+  inline static void set_external_target_at(Address instruction_payload,
+                                            Address target) {
+    *reinterpret_cast<Address*>(instruction_payload) = target;
+  }
+
   inline Handle<Object> code_target_object_handle_at(Address pc);
+  // Number of bytes taken up by the branch target in the code.
+  static const int kCallTargetSize = 4;      // Use 32-bit displacement.
+  static const int kExternalTargetSize = 8;  // Use 64-bit absolute.
   // Distance between the address of the code target in the call instruction
   // and the return address pushed on the stack.
   static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
@@ -469,6 +482,12 @@
   static const int kPatchReturnSequenceAddressOffset = 13 - 4;
   // TODO(X64): Rename this, removing the "Real", after changing the above.
   static const int kRealPatchReturnSequenceAddressOffset = 2;
+
+  // The x64 JS return sequence is padded with int3 to make it large
+  // enough to hold a call instruction when the debugger patches it.
+  static const int kCallInstructionLength = 13;
+  static const int kJSReturnSequenceLength = 13;
+
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -829,12 +848,12 @@
   }
 
   // Shifts dst right, duplicating sign bit, by cl % 64 bits.
-  void sar(Register dst) {
+  void sar_cl(Register dst) {
     shift(dst, 0x7);
   }
 
   // Shifts dst right, duplicating sign bit, by cl % 64 bits.
-  void sarl(Register dst) {
+  void sarl_cl(Register dst) {
     shift_32(dst, 0x7);
   }
 
@@ -842,11 +861,11 @@
     shift(dst, shift_amount, 0x4);
   }
 
-  void shl(Register dst) {
+  void shl_cl(Register dst) {
     shift(dst, 0x4);
   }
 
-  void shll(Register dst) {
+  void shll_cl(Register dst) {
     shift_32(dst, 0x4);
   }
 
@@ -858,11 +877,11 @@
     shift(dst, shift_amount, 0x5);
   }
 
-  void shr(Register dst) {
+  void shr_cl(Register dst) {
     shift(dst, 0x5);
   }
 
-  void shrl(Register dst) {
+  void shrl_cl(Register dst) {
     shift_32(dst, 0x5);
   }
 
@@ -920,7 +939,11 @@
   void testq(Register dst, Immediate mask);
 
   void xor_(Register dst, Register src) {
-    arithmetic_op(0x33, dst, src);
+    if (dst.code() == src.code()) {
+      arithmetic_op_32(0x33, dst, src);
+    } else {
+      arithmetic_op(0x33, dst, src);
+    }
   }
 
   void xorl(Register dst, Register src) {
@@ -1109,7 +1132,7 @@
   void RecordStatementPosition(int pos);
   void WriteRecordedPositions();
 
-  int pc_offset() const  { return pc_ - buffer_; }
+  int pc_offset() const  { return static_cast<int>(pc_ - buffer_); }
   int current_statement_position() const { return current_statement_position_; }
   int current_position() const  { return current_position_; }
 
@@ -1121,7 +1144,9 @@
   }
 
   // Get the number of bytes available in the buffer.
-  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+  inline int available_space() const {
+    return static_cast<int>(reloc_info_writer.pos() - pc_);
+  }
 
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512*MB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 01992ce..f444d2c 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -246,6 +246,8 @@
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
     __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
     __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
     __ bind(&patch_receiver);
@@ -318,47 +320,28 @@
   __ push(Operand(rbp, kArgumentsOffset));
   __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  if (FLAG_check_stack) {
-    // We need to catch preemptions right here, otherwise an unlucky preemption
-    // could show up as a failed apply.
-    Label retry_preemption;
-    Label no_preemption;
-    __ bind(&retry_preemption);
-    ExternalReference stack_guard_limit =
-        ExternalReference::address_of_stack_guard_limit();
-    __ movq(kScratchRegister, stack_guard_limit);
-    __ movq(rcx, rsp);
-    __ subq(rcx, Operand(kScratchRegister, 0));
-    // rcx contains the difference between the stack limit and the stack top.
-    // We use it below to check that there is enough room for the arguments.
-    __ j(above, &no_preemption);
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+  __ movq(rcx, rsp);
+  // Make rcx the space we have left. The stack might already be overflowed
+  // here which will cause rcx to become negative.
+  __ subq(rcx, kScratchRegister);
+  // Make rdx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmpq(rcx, rdx);
+  __ j(greater, &okay);  // Signed comparison.
 
-    // Preemption!
-    // Because runtime functions always remove the receiver from the stack, we
-    // have to fake one to avoid underflowing the stack.
-    __ push(rax);
-    __ Push(Smi::FromInt(0));
-
-    // Do call to runtime routine.
-    __ CallRuntime(Runtime::kStackGuard, 1);
-    __ pop(rax);
-    __ jmp(&retry_preemption);
-
-    __ bind(&no_preemption);
-
-    Label okay;
-    // Make rdx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-    __ cmpq(rcx, rdx);
-    __ j(greater, &okay);
-
-    // Too bad: Out of stack space.
-    __ push(Operand(rbp, kFunctionOffset));
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-  }
+  // Out of stack space.
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(rax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
 
   // Push current index and limit.
   const int kLimitOffset =
@@ -400,6 +383,8 @@
   const int kGlobalOffset =
       Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
   __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
   // Push the receiver.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0029b74..36f0e63 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,6 +29,7 @@
 
 #include "bootstrapper.h"
 #include "codegen-inl.h"
+#include "compiler.h"
 #include "debug.h"
 #include "ic-inl.h"
 #include "parser.h"
@@ -74,7 +75,6 @@
 
 CodeGenState::CodeGenState(CodeGenerator* owner)
     : owner_(owner),
-      typeof_state_(NOT_INSIDE_TYPEOF),
       destination_(NULL),
       previous_(NULL) {
   owner_->set_state(this);
@@ -82,10 +82,8 @@
 
 
 CodeGenState::CodeGenState(CodeGenerator* owner,
-                           TypeofState typeof_state,
                            ControlDestination* destination)
     : owner_(owner),
-      typeof_state_(typeof_state),
       destination_(destination),
       previous_(owner->state()) {
   owner_->set_state(this);
@@ -507,13 +505,13 @@
   // Add padding that will be overwritten by a debugger breakpoint.
   // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
   // with length 7 (3 + 1 + 3).
-  const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
   for (int i = 0; i < kPadding; ++i) {
     masm_->int3();
   }
   // Check that the size of the code used for returning matches what is
   // expected by the debugger.
-  ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+  ASSERT_EQ(Assembler::kJSReturnSequenceLength,
             masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
 #endif
   DeleteFrame();
@@ -643,27 +641,6 @@
 }
 
 
-class CallFunctionStub: public CodeStub {
- public:
-  CallFunctionStub(int argc, InLoopFlag in_loop)
-      : argc_(argc), in_loop_(in_loop) { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  int argc_;
-  InLoopFlag in_loop_;
-
-#ifdef DEBUG
-  void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
-  Major MajorKey() { return CallFunction; }
-  int MinorKey() { return argc_; }
-  InLoopFlag InLoop() { return in_loop_; }
-};
-
-
 void CodeGenerator::CallApplyLazy(Property* apply,
                                   Expression* receiver,
                                   VariableProxy* arguments,
@@ -676,7 +653,7 @@
   // Load the apply function onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
   Reference ref(this, apply);
-  ref.GetValue(NOT_INSIDE_TYPEOF);
+  ref.GetValue();
   ASSERT(ref.type() == Reference::NAMED);
 
   // Load the receiver and the existing arguments object onto the
@@ -852,12 +829,10 @@
 
 
 void CodeGenerator::CheckStack() {
-  if (FLAG_check_stack) {
-    DeferredStackCheck* deferred = new DeferredStackCheck;
-    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-    deferred->Branch(below);
-    deferred->BindExit();
-  }
+  DeferredStackCheck* deferred = new DeferredStackCheck;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  deferred->Branch(below);
+  deferred->BindExit();
 }
 
 
@@ -1003,7 +978,7 @@
     JumpTarget then;
     JumpTarget else_;
     ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+    LoadCondition(node->condition(), &dest, true);
 
     if (dest.false_was_fall_through()) {
       // The else target was bound, so we compile the else part first.
@@ -1030,7 +1005,7 @@
     ASSERT(!has_else_stm);
     JumpTarget then;
     ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+    LoadCondition(node->condition(), &dest, true);
 
     if (dest.false_was_fall_through()) {
       // The exit label was bound.  We may have dangling jumps to the
@@ -1050,7 +1025,7 @@
     ASSERT(!has_then_stm);
     JumpTarget else_;
     ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+    LoadCondition(node->condition(), &dest, true);
 
     if (dest.true_was_fall_through()) {
       // The exit label was bound.  We may have dangling jumps to the
@@ -1072,7 +1047,7 @@
     // or control flow effect).  LoadCondition is called without
     // forcing control flow.
     ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+    LoadCondition(node->condition(), &dest, false);
     if (!dest.is_used()) {
       // We got a value on the frame rather than (or in addition to)
       // control flow.
@@ -1343,8 +1318,10 @@
         node->continue_target()->Bind();
       }
       if (has_valid_frame()) {
+        Comment cmnt(masm_, "[ DoWhileCondition");
+        CodeForDoWhileConditionPosition(node);
         ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        LoadCondition(node->cond(), &dest, true);
       }
       if (node->break_target()->is_linked()) {
         node->break_target()->Bind();
@@ -1401,7 +1378,7 @@
       // Compile the test with the body as the true target and preferred
       // fall-through and with the break target as the false target.
       ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+      LoadCondition(node->cond(), &dest, true);
 
       if (dest.false_was_fall_through()) {
         // If we got the break target as fall-through, the test may have
@@ -1448,7 +1425,7 @@
           // The break target is the fall-through (body is a backward
           // jump from here and thus an invalid fall-through).
           ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          LoadCondition(node->cond(), &dest, true);
         }
       } else {
         // If we have chosen not to recompile the test at the
@@ -1540,7 +1517,7 @@
       // Compile the test with the body as the true target and preferred
       // fall-through and with the break target as the false target.
       ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+      LoadCondition(node->cond(), &dest, true);
 
       if (dest.false_was_fall_through()) {
         // If we got the break target as fall-through, the test may have
@@ -1610,7 +1587,7 @@
           // The break target is the fall-through (body is a backward
           // jump from here).
           ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          LoadCondition(node->cond(), &dest, true);
         }
       } else {
         // Otherwise, jump back to the test at the top.
@@ -1685,8 +1662,54 @@
   jsobject.Bind();
   // Get the set of properties (as a FixedArray or Map).
   // rax: value to be iterated over
-  frame_->EmitPush(rax);  // push the object being iterated over (slot 4)
+  frame_->EmitPush(rax);  // Push the object being iterated over.
 
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  JumpTarget call_runtime;
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+  JumpTarget check_prototype;
+  JumpTarget use_cache;
+  __ movq(rcx, rax);
+  loop.Bind();
+  // Check that there are no elements.
+  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
+  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+  call_runtime.Branch(not_equal);
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in ebx for the subsequent
+  // prototype load.
+  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
+  call_runtime.Branch(equal);
+  // Check that there in an enum cache in the non-empty instance
+  // descriptors.  This is the case if the next enumeration index
+  // field does not contain a smi.
+  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+  is_smi = masm_->CheckSmi(rdx);
+  call_runtime.Branch(is_smi);
+  // For all objects but the receiver, check that the cache is empty.
+  __ cmpq(rcx, rax);
+  check_prototype.Branch(equal);
+  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+  call_runtime.Branch(not_equal);
+  check_prototype.Bind();
+  // Load the prototype from the map and loop if non-null.
+  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
+  loop.Branch(not_equal);
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+  use_cache.Jump();
+
+  call_runtime.Bind();
+  // Call the runtime to get the property names for the object.
   frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
   frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
 
@@ -1699,8 +1722,11 @@
   __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
   fixed_array.Branch(not_equal);
 
+  use_cache.Bind();
   // Get enum cache
-  // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+  // rax: map (either the result from a call to
+  // Runtime::kGetPropertyNamesFast or has been fetched directly from
+  // the object)
   __ movq(rcx, rax);
   __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
   // Get the bridge array held in the enumeration index field.
@@ -2190,7 +2216,8 @@
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+  Handle<JSFunction> boilerplate =
+      Compiler::BuildBoilerplate(node, script_, this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
   InstantiateBoilerplate(boilerplate);
@@ -2210,25 +2237,25 @@
   JumpTarget else_;
   JumpTarget exit;
   ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+  LoadCondition(node->condition(), &dest, true);
 
   if (dest.false_was_fall_through()) {
     // The else target was bound, so we compile the else part first.
-    Load(node->else_expression(), typeof_state());
+    Load(node->else_expression());
 
     if (then.is_linked()) {
       exit.Jump();
       then.Bind();
-      Load(node->then_expression(), typeof_state());
+      Load(node->then_expression());
     }
   } else {
     // The then target was bound, so we compile the then part first.
-    Load(node->then_expression(), typeof_state());
+    Load(node->then_expression());
 
     if (else_.is_linked()) {
       exit.Jump();
       else_.Bind();
-      Load(node->else_expression(), typeof_state());
+      Load(node->else_expression());
     }
   }
 
@@ -2238,7 +2265,7 @@
 
 void CodeGenerator::VisitSlot(Slot* node) {
   Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, typeof_state());
+  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2251,7 +2278,7 @@
   } else {
     ASSERT(var->is_global());
     Reference ref(this, node);
-    ref.GetValue(typeof_state());
+    ref.GetValue();
   }
 }
 
@@ -2642,9 +2669,9 @@
       // the target, with an implicit promise that it will be written to again
       // before it is read.
       if (literal != NULL || (right_var != NULL && right_var != var)) {
-        target.TakeValue(NOT_INSIDE_TYPEOF);
+        target.TakeValue();
       } else {
-        target.GetValue(NOT_INSIDE_TYPEOF);
+        target.GetValue();
       }
       Load(node->value());
       GenericBinaryOperation(node->binary_op(),
@@ -2692,7 +2719,7 @@
 void CodeGenerator::VisitProperty(Property* node) {
   Comment cmnt(masm_, "[ Property");
   Reference property(this, node);
-  property.GetValue(typeof_state());
+  property.GetValue();
 }
 
 
@@ -2878,7 +2905,7 @@
 
       // Load the function to call from the property through a reference.
       Reference ref(this, property);
-      ref.GetValue(NOT_INSIDE_TYPEOF);
+      ref.GetValue();
 
       // Pass receiver to called function.
       if (property->is_synthetic()) {
@@ -2984,9 +3011,6 @@
 
 
 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  // Note that because of NOT and an optimization in comparison of a typeof
-  // expression to a literal string, this function can fail to leave a value
-  // on top of the frame or in the cc register.
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
@@ -2995,7 +3019,7 @@
     // Swap the true and false targets but keep the same actual label
     // as the fall through.
     destination()->Invert();
-    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+    LoadCondition(node->expression(), destination(), true);
     // Swap the labels back.
     destination()->Invert();
 
@@ -3235,7 +3259,7 @@
       if (!is_postfix) frame_->Push(Smi::FromInt(0));
       return;
     }
-    target.TakeValue(NOT_INSIDE_TYPEOF);
+    target.TakeValue();
 
     Result new_value = frame_->Pop();
     new_value.ToRegister();
@@ -3293,9 +3317,6 @@
   // TODO(X64): This code was copied verbatim from codegen-ia32.
   //     Either find a reason to change it or move it to a shared location.
 
-  // Note that due to an optimization in comparison operations (typeof
-  // compared to a string literal), we can evaluate a binary expression such
-  // as AND or OR and not leave a value on the frame or in the cc register.
   Comment cmnt(masm_, "[ BinaryOperation");
   Token::Value op = node->op();
 
@@ -3311,7 +3332,7 @@
   if (op == Token::AND) {
     JumpTarget is_true;
     ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+    LoadCondition(node->left(), &dest, false);
 
     if (dest.false_was_fall_through()) {
       // The current false target was used as the fall-through.  If
@@ -3330,7 +3351,7 @@
         is_true.Bind();
         // The left subexpression compiled to control flow, so the
         // right one is free to do so as well.
-        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+        LoadCondition(node->right(), destination(), false);
       } else {
         // We have actually just jumped to or bound the current false
         // target but the current control destination is not marked as
@@ -3341,7 +3362,7 @@
     } else if (dest.is_used()) {
       // The left subexpression compiled to control flow (and is_true
       // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      LoadCondition(node->right(), destination(), false);
 
     } else {
       // We have a materialized value on the frame, so we exit with
@@ -3374,7 +3395,7 @@
   } else if (op == Token::OR) {
     JumpTarget is_false;
     ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+    LoadCondition(node->left(), &dest, false);
 
     if (dest.true_was_fall_through()) {
       // The current true target was used as the fall-through.  If
@@ -3393,7 +3414,7 @@
         is_false.Bind();
         // The left subexpression compiled to control flow, so the
         // right one is free to do so as well.
-        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+        LoadCondition(node->right(), destination(), false);
       } else {
         // We have just jumped to or bound the current true target but
         // the current control destination is not marked as used.
@@ -3403,7 +3424,7 @@
     } else if (dest.is_used()) {
       // The left subexpression compiled to control flow (and is_false
       // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      LoadCondition(node->right(), destination(), false);
 
     } else {
       // We have a materialized value on the frame, so we exit with
@@ -3525,6 +3546,9 @@
       destination()->false_target()->Branch(is_smi);
       frame_->Spill(answer.reg());
       __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+      destination()->true_target()->Branch(equal);
+      // Regular expressions are callable so typeof == 'function'.
+      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
       answer.Unuse();
       destination()->Split(equal);
 
@@ -3534,9 +3558,11 @@
       __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
       destination()->true_target()->Branch(equal);
 
+      // Regular expressions are typeof == 'function', not 'object'.
+      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
+      destination()->false_target()->Branch(equal);
+
       // It can be an undetectable object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
       __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
                Immediate(1 << Map::kIsUndetectable));
       destination()->false_target()->Branch(not_zero);
@@ -3639,6 +3665,48 @@
 }
 
 
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+  // This generates a fast version of:
+  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  Condition is_smi = masm_->CheckSmi(obj.reg());
+  destination()->false_target()->Branch(is_smi);
+
+  __ Move(kScratchRegister, Factory::null_value());
+  __ cmpq(obj.reg(), kScratchRegister);
+  destination()->true_target()->Branch(equal);
+
+  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+          Immediate(1 << Map::kIsUndetectable));
+  destination()->false_target()->Branch(not_zero);
+  __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+  destination()->false_target()->Branch(less);
+  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+  obj.Unuse();
+  destination()->Split(less_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+  // This generates a fast version of:
+  // (%_ClassOf(arg) === 'Function')
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  Condition is_smi = masm_->CheckSmi(obj.reg());
+  destination()->false_target()->Branch(is_smi);
+  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+  obj.Unuse();
+  destination()->Split(equal);
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
@@ -3681,7 +3749,6 @@
   Label slow_case;
   Label end;
   Label not_a_flat_string;
-  Label a_cons_string;
   Label try_again_with_new_string;
   Label ascii_string;
   Label got_char_code;
@@ -3749,30 +3816,19 @@
   __ testb(rcx, Immediate(kIsNotStringMask));
   __ j(not_zero, &slow_case);
 
-  // Here we make assumptions about the tag values and the shifts needed.
-  // See the comment in objects.h.
-  ASSERT(kLongStringTag == 0);
-  ASSERT(kMediumStringTag + String::kLongLengthShift ==
-         String::kMediumLengthShift);
-  ASSERT(kShortStringTag + String::kLongLengthShift ==
-         String::kShortLengthShift);
-  __ and_(rcx, Immediate(kStringSizeMask));
-  __ addq(rcx, Immediate(String::kLongLengthShift));
-  // Fetch the length field into the temporary register.
-  __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
-  __ shrl(temp.reg());  // The shift amount in ecx is implicit operand.
   // Check for index out of range.
-  __ cmpl(index.reg(), temp.reg());
+  __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
   __ j(greater_equal, &slow_case);
   // Reload the instance type (into the temp register this time)..
   __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
   __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
 
   // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
+  ASSERT_EQ(0, kSeqStringTag);
   __ testb(temp.reg(), Immediate(kStringRepresentationMask));
   __ j(not_zero, &not_a_flat_string);
   // Check for 1-byte or 2-byte string.
+  ASSERT_EQ(0, kTwoByteStringTag);
   __ testb(temp.reg(), Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string);
 
@@ -3799,21 +3855,16 @@
   __ bind(&not_a_flat_string);
   __ and_(temp.reg(), Immediate(kStringRepresentationMask));
   __ cmpb(temp.reg(), Immediate(kConsStringTag));
-  __ j(equal, &a_cons_string);
-  __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
   __ j(not_equal, &slow_case);
 
-  // SlicedString.
-  // Add the offset to the index and trigger the slow case on overflow.
-  __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
-  __ j(overflow, &slow_case);
-  // Getting the underlying string is done by running the cons string code.
-
   // ConsString.
-  __ bind(&a_cons_string);
-  // Get the first of the two strings.  Both sliced and cons strings
-  // store their source string at the same offset.
-  ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+  // Check that the right hand side is the empty string (ie if this is really a
+  // flat string in a cons string).  If that is not the case we would rather go
+  // to the runtime system now, to flatten the string.
+  __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
+  __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
+  __ j(not_equal, &slow_case);
+  // Get the first of the two strings.
   __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
   __ jmp(&try_again_with_new_string);
 
@@ -3994,6 +4045,17 @@
 }
 
 
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  Load(args->at(0));
+  Load(args->at(1));
+
+  Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+  frame_->Push(&answer);
+}
+
+
 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   JumpTarget leave, null, function, non_function_constructor;
@@ -4124,18 +4186,17 @@
 // -----------------------------------------------------------------------------
 // CodeGenerator implementation of Expressions
 
-void CodeGenerator::LoadAndSpill(Expression* expression,
-                                 TypeofState typeof_state) {
+void CodeGenerator::LoadAndSpill(Expression* expression) {
   // TODO(x64): No architecture specific code. Move to shared location.
   ASSERT(in_spilled_code());
   set_in_spilled_code(false);
-  Load(expression, typeof_state);
+  Load(expression);
   frame_->SpillAll();
   set_in_spilled_code(true);
 }
 
 
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
@@ -4143,7 +4204,7 @@
   JumpTarget true_target;
   JumpTarget false_target;
   ControlDestination dest(&true_target, &false_target, true);
-  LoadCondition(x, typeof_state, &dest, false);
+  LoadCondition(expr, &dest, false);
 
   if (dest.false_was_fall_through()) {
     // The false target was just bound.
@@ -4203,13 +4264,12 @@
 // partially compiled) into control flow to the control destination.
 // If force_control is true, control flow is forced.
 void CodeGenerator::LoadCondition(Expression* x,
-                                  TypeofState typeof_state,
                                   ControlDestination* dest,
                                   bool force_control) {
   ASSERT(!in_spilled_code());
   int original_height = frame_->height();
 
-  { CodeGenState new_state(this, typeof_state, dest);
+  { CodeGenState new_state(this, dest);
     Visit(x);
 
     // If we hit a stack overflow, we may not have actually visited
@@ -4837,23 +4897,25 @@
 }
 
 
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
-  Variable* variable = x->AsVariableProxy()->AsVariable();
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+  // Special handling of identifiers as subexpressions of typeof.
+  Variable* variable = expr->AsVariableProxy()->AsVariable();
   if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // NOTE: This is somewhat nasty. We force the compiler to load
-    // the variable as if through '<global>.<variable>' to make sure we
-    // do not get reference errors.
+    // For a global variable we build the property reference
+    // <global>.<variable> and perform a (regular non-contextual) property
+    // load to make sure we do not get reference errors.
     Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
     Literal key(variable->name());
-    // TODO(1241834): Fetch the position from the variable instead of using
-    // no position.
     Property property(&global, &key, RelocInfo::kNoPosition);
-    Load(&property);
+    Reference ref(this, &property);
+    ref.GetValue();
+  } else if (variable != NULL && variable->slot() != NULL) {
+    // For a variable that rewrites to a slot, we signal it is the immediate
+    // subexpression of a typeof.
+    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
   } else {
-    Load(x, INSIDE_TYPEOF);
+    // Anything else can be handled normally.
+    Load(expr);
   }
 }
 
@@ -5057,10 +5119,8 @@
 
 
 void DeferredInlineBinaryOperation::Generate() {
-  __ push(left_);
-  __ push(right_);
-  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
-  __ CallStub(&stub);
+  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
@@ -5089,16 +5149,16 @@
       // Bit operations always assume they likely operate on Smis. Still only
       // generate the inline Smi check code if this operation is part of a loop.
       flags = (loop_nesting() > 0)
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
 
     default:
       // By default only inline the Smi check code for likely smis if this
       // operation is part of a loop.
       flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
   }
 
@@ -5157,7 +5217,7 @@
     return;
   }
 
-  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+  if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
     LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
   } else {
     frame_->Push(&left);
@@ -5166,7 +5226,7 @@
     // that does not check for the fast smi case.
     // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
     if (generate_no_smi_code) {
-      flags = SMI_CODE_INLINED;
+      flags = NO_SMI_CODE_IN_STUB;
     }
     GenericBinaryOpStub stub(op, overwrite_mode, flags);
     Result answer = frame_->CallStub(&stub, 2);
@@ -5221,41 +5281,33 @@
 
 
 void DeferredInlineSmiAdd::Generate() {
-  __ push(dst_);
-  __ Push(value_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiAddReversed::Generate() {
-  __ Push(value_);
-  __ push(dst_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, dst_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiSub::Generate() {
-  __ push(dst_);
-  __ Push(value_);
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiOperation::Generate() {
-  __ push(src_);
-  __ Push(value_);
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
       overwrite_mode_,
-      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
-  __ CallStub(&stub);
+      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, src_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
@@ -5758,7 +5810,7 @@
 }
 
 
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
   ASSERT(!cgen_->in_spilled_code());
   ASSERT(cgen_->HasValidEntryRegisters());
   ASSERT(!is_illegal());
@@ -5775,17 +5827,11 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       break;
     }
 
     case NAMED: {
-      // TODO(1241834): Make sure that it is safe to ignore the
-      // distinction between expressions in a typeof and not in a
-      // typeof. If there is a chance that reference errors can be
-      // thrown below, we must distinguish between the two kinds of
-      // loads (typeof expression loads must not throw a reference
-      // error).
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
@@ -5867,8 +5913,6 @@
     }
 
     case KEYED: {
-      // TODO(1241834): Make sure that this it is safe to ignore the
-      // distinction between expressions in a typeof and not in a typeof.
       Comment cmnt(masm, "[ Load from keyed Property");
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
@@ -5990,7 +6034,7 @@
 }
 
 
-void Reference::TakeValue(TypeofState typeof_state) {
+void Reference::TakeValue() {
   // TODO(X64): This function is completely architecture independent. Move
   // it somewhere shared.
 
@@ -5999,7 +6043,7 @@
   ASSERT(!cgen_->in_spilled_code());
   ASSERT(!is_illegal());
   if (type_ != SLOT) {
-    GetValue(typeof_state);
+    GetValue();
     return;
   }
 
@@ -6009,7 +6053,7 @@
       slot->type() == Slot::CONTEXT ||
       slot->var()->mode() == Variable::CONST ||
       slot->is_arguments()) {
-    GetValue(typeof_state);
+    GetValue();
     return;
   }
 
@@ -6179,11 +6223,8 @@
   // String value => false iff empty.
   __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
   __ j(above_equal, &not_string);
-  __ and_(rcx, Immediate(kStringSizeMask));
-  __ cmpq(rcx, Immediate(kShortStringTag));
-  __ j(not_equal, &true_result);  // Empty string is always short.
   __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
-  __ shr(rdx, Immediate(String::kShortLengthShift));
+  __ testl(rdx, rdx);
   __ j(zero, &false_result);
   __ jmp(&true_result);
 
@@ -6379,19 +6420,18 @@
       // not NaN.
       // The representation of NaN values has all exponent bits (52..62) set,
       // and not all mantissa bits (0..51) clear.
-      // Read double representation into rax.
-      __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
-      __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
-      // Test that exponent bits are all set.
-      __ or_(rbx, rax);
-      __ cmpq(rbx, rax);
-      __ j(not_equal, &return_equal);
-      // Shift out flag and all exponent bits, retaining only mantissa.
-      __ shl(rax, Immediate(12));
-      // If all bits in the mantissa are zero the number is Infinity, and
-      // we return zero.  Otherwise it is a NaN, and we return non-zero.
-      // We cannot just return rax because only eax is tested on return.
-      __ setcc(not_zero, rax);
+      // We only allow QNaNs, which have bit 51 set (which also rules out
+      // the value being Infinity).
+
+      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+      // all bits in the mask are set. We only need to check the word
+      // that contains the exponent and high bit of the mantissa.
+      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+      __ xorl(rax, rax);
+      __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
+      __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+      __ setcc(above_equal, rax);
       __ ret(0);
 
       __ bind(&not_identical);
@@ -6614,11 +6654,11 @@
   __ jmp(&loop);
 
   __ bind(&is_instance);
-  __ xor_(rax, rax);
+  __ xorl(rax, rax);
   __ ret(2 * kPointerSize);
 
   __ bind(&is_not_instance);
-  __ Move(rax, Smi::FromInt(1));
+  __ movl(rax, Immediate(1));
   __ ret(2 * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
@@ -6784,7 +6824,7 @@
                               Label* throw_normal_exception,
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
-                              StackFrame::Type frame_type,
+                              ExitFrame::Mode mode,
                               bool do_gc,
                               bool always_allocate_scope) {
   // rax: result parameter for PerformGC, if any.
@@ -6854,7 +6894,9 @@
   // If return value is on the stack, pop it to registers.
   if (result_size_ > 1) {
     ASSERT_EQ(2, result_size_);
-    // Position above 4 argument mirrors and arguments object.
+    // Read result values stored on stack. Result is stored
+    // above the four argument mirror slots and the two
+    // Arguments object slots.
     __ movq(rax, Operand(rsp, 6 * kPointerSize));
     __ movq(rdx, Operand(rsp, 7 * kPointerSize));
   }
@@ -6865,7 +6907,7 @@
   __ j(zero, &failure_returned);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(frame_type, result_size_);
+  __ LeaveExitFrame(mode, result_size_);
   __ ret(0);
 
   // Handling of failure.
@@ -6995,12 +7037,12 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
-  StackFrame::Type frame_type = is_debug_break ?
-      StackFrame::EXIT_DEBUG :
-      StackFrame::EXIT;
+  ExitFrame::Mode mode = is_debug_break ?
+      ExitFrame::MODE_DEBUG :
+      ExitFrame::MODE_NORMAL;
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(frame_type, result_size_);
+  __ EnterExitFrame(mode, result_size_);
 
   // rax: Holds the context at this point, but should not be used.
   //      On entry to code generated by GenerateCore, it must hold
@@ -7023,7 +7065,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                false,
                false);
 
@@ -7032,7 +7074,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                false);
 
@@ -7043,7 +7085,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                true);
 
@@ -7058,6 +7100,11 @@
 }
 
 
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+  UNREACHABLE();
+}
+
+
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   Label invoke, exit;
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -7340,6 +7387,127 @@
 }
 
 
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (!(left.is(left_arg) && right.is(right_arg))) {
+      if (left.is(right_arg) && right.is(left_arg)) {
+        if (IsOperationCommutative()) {
+          SetArgsReversed();
+        } else {
+          __ xchg(left, right);
+        }
+      } else if (left.is(left_arg)) {
+        __ movq(right_arg, right);
+      } else if (left.is(right_arg)) {
+        if (IsOperationCommutative()) {
+          __ movq(left_arg, right);
+          SetArgsReversed();
+        } else {
+          // Order of moves important to avoid destroying left argument.
+          __ movq(left_arg, left);
+          __ movq(right_arg, right);
+        }
+      } else if (right.is(left_arg)) {
+        if (IsOperationCommutative()) {
+          __ movq(right_arg, left);
+          SetArgsReversed();
+        } else {
+          // Order of moves important to avoid destroying right argument.
+          __ movq(right_arg, right);
+          __ movq(left_arg, left);
+        }
+      } else if (right.is(right_arg)) {
+        __ movq(left_arg, left);
+      } else {
+        // Order of moves is not important.
+        __ movq(left_arg, left);
+        __ movq(right_arg, right);
+      }
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Smi* right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ Push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (left.is(left_arg)) {
+      __ Move(right_arg, right);
+    } else if (left.is(right_arg) && IsOperationCommutative()) {
+      __ Move(left_arg, right);
+      SetArgsReversed();
+    } else {
+      __ movq(left_arg, left);
+      __ Move(right_arg, right);
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Smi* left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ Push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (right.is(right_arg)) {
+      __ Move(left_arg, left);
+    } else if (right.is(left_arg) && IsOperationCommutative()) {
+      __ Move(right_arg, left);
+      SetArgsReversed();
+    } else {
+      __ Move(left_arg, left);
+      __ movq(right_arg, right);
+    }
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (rax <op> rbx) and
   // leave result in register rax.
@@ -7412,22 +7580,21 @@
 
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
-  if (flags_ == SMI_CODE_IN_STUB) {
+  if (HasSmiCodeInStub()) {
     // The fast case smi code wasn't inlined in the stub caller
     // code. Generate it here to speed up common operations.
     Label slow;
     __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // get y
     __ movq(rax, Operand(rsp, 2 * kPointerSize));  // get x
     GenerateSmiCode(masm, &slow);
-    __ ret(2 * kPointerSize);  // remove both operands
+    GenerateReturn(masm);
 
     // Too bad. The fast case smi code didn't succeed.
     __ bind(&slow);
   }
 
-  // Setup registers.
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // get y
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // get x
+  // Make sure the arguments are in rdx and rax.
+  GenerateLoadArguments(masm);
 
   // Floating point case.
   switch (op_) {
@@ -7451,7 +7618,10 @@
           __ JumpIfNotSmi(rax, &skip_allocation);
           // Fall through!
         case NO_OVERWRITE:
-          __ AllocateHeapNumber(rax, rcx, &call_runtime);
+          // Allocate a heap number for the result. Keep rax and rdx intact
+          // for the possible runtime call.
+          __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+          __ movq(rax, rbx);
           __ bind(&skip_allocation);
           break;
         default: UNREACHABLE();
@@ -7467,7 +7637,7 @@
         default: UNREACHABLE();
       }
       __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
-      __ ret(2 * kPointerSize);
+      GenerateReturn(masm);
     }
     case Token::MOD: {
       // For MOD we go directly to runtime in the non-smi case.
@@ -7492,7 +7662,7 @@
       if (use_sse3_) {
         // Truncate the operands to 32-bit integers and check for
         // exceptions in doing so.
-        CpuFeatures::Scope scope(CpuFeatures::SSE3);
+        CpuFeatures::Scope scope(SSE3);
         __ fisttp_s(Operand(rsp, 0 * kPointerSize));
         __ fisttp_s(Operand(rsp, 1 * kPointerSize));
         __ fnstsw_ax();
@@ -7521,9 +7691,9 @@
         case Token::BIT_OR:  __ orl(rax, rcx); break;
         case Token::BIT_AND: __ andl(rax, rcx); break;
         case Token::BIT_XOR: __ xorl(rax, rcx); break;
-        case Token::SAR: __ sarl(rax); break;
-        case Token::SHL: __ shll(rax); break;
-        case Token::SHR: __ shrl(rax); break;
+        case Token::SAR: __ sarl_cl(rax); break;
+        case Token::SHL: __ shll_cl(rax); break;
+        case Token::SHR: __ shrl_cl(rax); break;
         default: UNREACHABLE();
       }
       if (op_ == Token::SHR) {
@@ -7535,7 +7705,7 @@
       __ JumpIfNotValidSmiValue(rax, &non_smi_result);
       // Tag smi result, if possible, and return.
       __ Integer32ToSmi(rax, rax);
-      __ ret(2 * kPointerSize);
+      GenerateReturn(masm);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
       if (op_ != Token::SHR && non_smi_result.is_linked()) {
@@ -7561,7 +7731,7 @@
         __ movq(Operand(rsp, 1 * kPointerSize), rbx);
         __ fild_s(Operand(rsp, 1 * kPointerSize));
         __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
-        __ ret(2 * kPointerSize);
+        GenerateReturn(masm);
       }
 
       // Clear the FPU exception flag and reset the stack before calling
@@ -7592,12 +7762,62 @@
   }
 
   // If all else fails, use the runtime system to get the correct
-  // result.
+  // result. If arguments was passed in registers now place them on the
+  // stack in the correct order below the return address.
   __ bind(&call_runtime);
+  if (HasArgumentsInRegisters()) {
+    __ pop(rcx);
+    if (HasArgumentsReversed()) {
+      __ push(rax);
+      __ push(rdx);
+    } else {
+      __ push(rdx);
+      __ push(rax);
+    }
+    __ push(rcx);
+  }
   switch (op_) {
-    case Token::ADD:
+    case Token::ADD: {
+      // Test for string arguments before calling runtime.
+      Label not_strings, both_strings, not_string1, string1;
+      Condition is_smi;
+      Result answer;
+      __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // First argument.
+      __ movq(rax, Operand(rsp, 1 * kPointerSize));  // Second argument.
+      is_smi = masm->CheckSmi(rdx);
+      __ j(is_smi, &not_string1);
+      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
+      __ j(above_equal, &not_string1);
+
+      // First argument is a a string, test second.
+      is_smi = masm->CheckSmi(rax);
+      __ j(is_smi, &string1);
+      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+      __ j(above_equal, &string1);
+
+      // First and second argument are strings.
+      Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
+      __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
+
+      // Only first argument is a string.
+      __ bind(&string1);
+      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+      // First argument was not a string, test second.
+      __ bind(&not_string1);
+      is_smi = masm->CheckSmi(rax);
+      __ j(is_smi, &not_strings);
+      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+      __ j(above_equal, &not_strings);
+
+      // Only second argument is a string.
+      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+      __ bind(&not_strings);
+      // Neither argument is a string.
       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
       break;
+    }
     case Token::SUB:
       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
       break;
@@ -7634,6 +7854,26 @@
 }
 
 
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+  // If arguments are not passed in registers read them from the stack.
+  if (!HasArgumentsInRegisters()) {
+    __ movq(rax, Operand(rsp, 1 * kPointerSize));
+    __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+  // If arguments are not passed in registers remove them from the stack before
+  // returning.
+  if (!HasArgumentsInRegisters()) {
+    __ ret(2 * kPointerSize);  // Remove both operands
+  } else {
+    __ ret(0);
+  }
+}
+
+
 int CompareStub::MinorKey() {
   // Encode the two parameters in a unique 16 bit value.
   ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
@@ -7653,7 +7893,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler masm(buffer, actual_size);
+  Assembler masm(buffer, static_cast<int>(actual_size));
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
   // (e.g. the JavaScript nan-object).
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 56b88b7..8539884 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -77,12 +77,12 @@
   // Generate code to push the value of the reference on top of the
   // expression stack.  The reference is expected to be already on top of
   // the expression stack, and it is left in place with its value above it.
-  void GetValue(TypeofState typeof_state);
+  void GetValue();
 
   // Like GetValue except that the slot is expected to be written to before
   // being read from again.  Thae value of the reference may be invalidated,
   // causing subsequent attempts to read it to fail.
-  void TakeValue(TypeofState typeof_state);
+  void TakeValue();
 
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
@@ -241,28 +241,20 @@
   explicit CodeGenState(CodeGenerator* owner);
 
   // Create a code generator state based on a code generator's current
-  // state.  The new state may or may not be inside a typeof, and has its
-  // own control destination.
-  CodeGenState(CodeGenerator* owner,
-               TypeofState typeof_state,
-               ControlDestination* destination);
+  // state.  The new state has its own control destination.
+  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
 
   // Destroy a code generator state and restore the owning code generator's
   // previous state.
   ~CodeGenState();
 
   // Accessors for the state.
-  TypeofState typeof_state() const { return typeof_state_; }
   ControlDestination* destination() const { return destination_; }
 
  private:
   // The owning code generator.
   CodeGenerator* owner_;
 
-  // A flag indicating whether we are compiling the immediate subexpression
-  // of a typeof expression.
-  TypeofState typeof_state_;
-
   // A control destination in case the expression has a control-flow
   // effect.
   ControlDestination* destination_;
@@ -307,17 +299,12 @@
   static bool ShouldGenerateLog(Expression* type);
 #endif
 
-  static void SetFunctionInfo(Handle<JSFunction> fun,
-                              FunctionLiteral* lit,
-                              bool is_toplevel,
-                              Handle<Script> script);
-
   static void RecordPositions(MacroAssembler* masm, int pos);
 
   // Accessors
   MacroAssembler* masm() { return masm_; }
-
   VirtualFrame* frame() const { return frame_; }
+  Handle<Script> script() { return script_; }
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -353,7 +340,6 @@
   bool is_eval() { return is_eval_; }
 
   // State
-  TypeofState typeof_state() const { return state_->typeof_state(); }
   ControlDestination* destination() const { return state_->destination(); }
 
   // Track loop nesting level.
@@ -414,18 +400,16 @@
   }
 
   void LoadCondition(Expression* x,
-                     TypeofState typeof_state,
                      ControlDestination* destination,
                      bool force_control);
-  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void Load(Expression* expr);
   void LoadGlobal();
   void LoadGlobalReceiver();
 
   // Generate code to push the value of an expression on top of the frame
   // and then spill the frame fully to memory.  This function is used
   // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression,
-                    TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadAndSpill(Expression* expression);
 
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -511,8 +495,6 @@
   static bool PatchInlineRuntimeEntry(Handle<String> name,
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
-  static Handle<Code> ComputeLazyCompile(int argc);
-  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
   static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -528,6 +510,8 @@
   void GenerateIsSmi(ZoneList<Expression*>* args);
   void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
   void GenerateIsArray(ZoneList<Expression*>* args);
+  void GenerateIsObject(ZoneList<Expression*>* args);
+  void GenerateIsFunction(ZoneList<Expression*>* args);
 
   // Support for construct call checks.
   void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -560,6 +544,9 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Fast support for StringAdd.
+  void GenerateStringAdd(ZoneList<Expression*>* args);
+
   // Simple condition analysis.
   enum ConditionAnalysis {
     ALWAYS_TRUE,
@@ -574,6 +561,7 @@
   void CodeForFunctionPosition(FunctionLiteral* fun);
   void CodeForReturnPosition(FunctionLiteral* fun);
   void CodeForStatementPosition(Statement* node);
+  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
   void CodeForSourcePosition(int pos);
 
 #ifdef DEBUG
@@ -633,6 +621,25 @@
 // times by generated code to perform common tasks, often the slow
 // case of a JavaScript operation.  They are all subclasses of CodeStub,
 // which is declared in code-stubs.h.
+class CallFunctionStub: public CodeStub {
+ public:
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int argc_;
+  InLoopFlag in_loop_;
+
+#ifdef DEBUG
+  void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+  Major MajorKey() { return CallFunction; }
+  int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
+};
 
 
 class ToBooleanStub: public CodeStub {
@@ -647,11 +654,10 @@
 };
 
 
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
 enum GenericBinaryFlags {
-  SMI_CODE_IN_STUB,
-  SMI_CODE_INLINED
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
 };
 
 
@@ -660,45 +666,82 @@
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
                       GenericBinaryFlags flags)
-      : op_(op), mode_(mode), flags_(flags) {
-    use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+      : op_(op),
+        mode_(mode),
+        flags_(flags),
+        args_in_registers_(false),
+        args_reversed_(false) {
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
   GenericBinaryFlags flags_;
+  bool args_in_registers_;  // Arguments passed in registers not on the stack.
+  bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+    PrintF("GenericBinaryOpStub (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d)\n",
            Token::String(op_),
            static_cast<int>(mode_),
-           static_cast<int>(flags_));
+           static_cast<int>(flags_),
+           static_cast<int>(args_in_registers_),
+           static_cast<int>(args_reversed_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 12> {};
-  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 10> {};
+  class SSE3Bits: public BitField<bool, 12, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+  class ArgsReversedBits: public BitField<bool, 14, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
     // Encode the parameters in a unique 16 bit value.
     return OpBits::encode(op_)
-        | ModeBits::encode(mode_)
-        | FlagBits::encode(flags_)
-        | SSE3Bits::encode(use_sse3_);
+           | ModeBits::encode(mode_)
+           | FlagBits::encode(flags_)
+           | SSE3Bits::encode(use_sse3_)
+           | ArgsInRegistersBits::encode(args_in_registers_)
+           | ArgsReversedBits::encode(args_reversed_);
   }
+
   void Generate(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+
+  bool ArgsInRegistersSupported() {
+    return ((op_ == Token::ADD) || (op_ == Token::SUB)
+             || (op_ == Token::MUL) || (op_ == Token::DIV))
+            && flags_ != NO_SMI_CODE_IN_STUB;
+  }
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  void SetArgsInRegisters() { args_in_registers_ = true; }
+  void SetArgsReversed() { args_reversed_ = true; }
+  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+  bool HasArgumentsInRegisters() { return args_in_registers_; }
+  bool HasArgumentsReversed() { return args_reversed_; }
 };
 
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 8df0ab7..cc20c58 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -27,6 +27,10 @@
 
 // CPU specific code for x64 independent of OS goes here.
 
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
 #include "v8.h"
 
 #include "cpu.h"
@@ -49,6 +53,15 @@
 
   // If flushing of the instruction cache becomes necessary Windows has the
   // API function FlushInstructionCache.
+
+  // By default, valgrind only checks the stack for writes that might need to
+  // invalidate already cached translated code.  This leads to random
+  // instability when code patches or moves are sometimes unnoticed.  One
+  // solution is to run valgrind with --smc-check=all, but this comes at a big
+  // performance cost.  We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
 }
 
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 49240b4..bc88d46 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -181,7 +181,7 @@
 
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
   rinfo()->PatchCode(original_rinfo()->pc(),
-                     Debug::kX64JSReturnSequenceLength);
+                     Assembler::kJSReturnSequenceLength);
 }
 
 
@@ -191,9 +191,10 @@
 
 
 void BreakLocationIterator::SetDebugBreakAtReturn()  {
-  ASSERT(Debug::kX64JSReturnSequenceLength >= Debug::kX64CallInstructionLength);
+  ASSERT(Assembler::kJSReturnSequenceLength >=
+         Assembler::kCallInstructionLength);
   rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
-      Debug::kX64JSReturnSequenceLength - Debug::kX64CallInstructionLength);
+      Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
 }
 
 #endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 19bcf66..0b43e76 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -218,7 +218,7 @@
     OperandType op_order = bm[i].op_order_;
     id->op_order_ =
         static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
-    assert(id->type == NO_INSTR);  // Information not already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->type = type;
     id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
   }
@@ -232,7 +232,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = mnem;
     id->type = type;
     id->byte_size_operation = byte_size;
@@ -243,7 +243,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = NULL;  // Computed depending on condition code.
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -393,6 +393,7 @@
                               RegisterNameMapping register_name);
   int PrintRightOperand(byte* modrmp);
   int PrintRightByteOperand(byte* modrmp);
+  int PrintRightXMMOperand(byte* modrmp);
   int PrintOperands(const char* mnem,
                     OperandType op_order,
                     byte* data);
@@ -400,13 +401,15 @@
   int PrintImmediateOp(byte* data);
   const char* TwoByteMnemonic(byte opcode);
   int TwoByteOpcodeInstruction(byte* data);
-  int F7Instruction(byte* data);
+  int F6F7Instruction(byte* data);
   int ShiftInstruction(byte* data);
   int JumpShort(byte* data);
   int JumpConditional(byte* data);
   int JumpConditionalShort(byte* data);
   int SetCC(byte* data);
   int FPUInstruction(byte* data);
+  int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+  int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
   void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
@@ -568,6 +571,12 @@
 }
 
 
+int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerX64::NameOfXMMRegister);
+}
+
+
 // Returns number of bytes used including the current *data.
 // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
 int DisassemblerX64::PrintOperands(const char* mnem,
@@ -648,8 +657,8 @@
 
 
 // Returns number of bytes used, including *data.
-int DisassemblerX64::F7Instruction(byte* data) {
-  assert(*data == 0xF7);
+int DisassemblerX64::F6F7Instruction(byte* data) {
+  ASSERT(*data == 0xF7 || *data == 0xF6);
   byte modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -676,19 +685,12 @@
                    operand_size_code(),
                    NameOfCPURegister(rm));
     return 2;
-  } else if (mod == 3 && regop == 0) {
-    int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
-    AppendToBuffer("test%c %s,0x%x",
-                   operand_size_code(),
-                   NameOfCPURegister(rm),
-                   imm);
-    return 6;
   } else if (regop == 0) {
     AppendToBuffer("test%c ", operand_size_code());
-    int count = PrintRightOperand(data + 1);
-    int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
-    AppendToBuffer(",0x%x", imm);
-    return 1 + count + 4 /*int32_t*/;
+    int count = PrintRightOperand(data + 1);  // Use name of 64-bit register.
+    AppendToBuffer(",0x");
+    count += PrintImmediate(data + 1 + count, operand_size());
+    return 1 + count;
   } else {
     UnimplementedInstruction();
     return 2;
@@ -739,7 +741,7 @@
       UnimplementedInstruction();
       return num_bytes;
   }
-  assert(mnem != NULL);
+  ASSERT_NE(NULL, mnem);
   if (op == 0xD0) {
     imm8 = 1;
   } else if (op == 0xC0) {
@@ -762,7 +764,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpShort(byte* data) {
-  assert(*data == 0xEB);
+  ASSERT_EQ(0xEB, *data);
   byte b = *(data + 1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -772,7 +774,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpConditional(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
   const char* mnem = conditional_code_suffix[cond];
@@ -794,7 +796,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::SetCC(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_code_suffix[cond];
   AppendToBuffer("set%s%c ", mnem, operand_size_code());
@@ -805,168 +807,170 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::FPUInstruction(byte* data) {
-  byte b1 = *data;
-  byte b2 = *(data + 1);
-  if (b1 == 0xD9) {
-    const char* mnem = NULL;
-    switch (b2) {
-      case 0xE0:
-        mnem = "fchs";
-        break;
-      case 0xE1:
-        mnem = "fabs";
-        break;
-      case 0xE4:
-        mnem = "ftst";
-        break;
-      case 0xF5:
-        mnem = "fprem1";
-        break;
-      case 0xF7:
-        mnem = "fincstp";
-        break;
-      case 0xE8:
-        mnem = "fld1";
-        break;
-      case 0xEE:
-        mnem = "fldz";
-        break;
-      case 0xF8:
-        mnem = "fprem";
-        break;
-    }
-    if (mnem != NULL) {
-      AppendToBuffer("%s", mnem);
-      return 2;
-    } else if ((b2 & 0xF8) == 0xC8) {
-      AppendToBuffer("fxch st%d", b2 & 0x7);
-      return 2;
-    } else {
-      int mod, regop, rm;
-      get_modrm(*(data + 1), &mod, &regop, &rm);
-      const char* mnem = "?";
-      switch (regop) {
-        case 0:
-          mnem = "fld_s";
-          break;
-        case 3:
-          mnem = "fstp_s";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDD) {
-    int mod, regop, rm;
-    get_modrm(*(data + 1), &mod, &regop, &rm);
-    if (mod == 3) {
-      switch (regop) {
-        case 0:
-          AppendToBuffer("ffree st%d", rm & 7);
-          break;
-        case 2:
-          AppendToBuffer("fstp st%d", rm & 7);
-          break;
-        default:
-          UnimplementedInstruction();
-          break;
-      }
-      return 2;
-    } else {
-      const char* mnem = "?";
-      switch (regop) {
-        case 0:
-          mnem = "fld_d";
-          break;
-        case 3:
-          mnem = "fstp_d";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDB) {
-    int mod, regop, rm;
-    get_modrm(*(data + 1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case 0:
-        mnem = "fild_s";
-        break;
-      case 2:
-        mnem = "fist_s";
-        break;
-      case 3:
-        mnem = "fistp_s";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDF) {
-    if (b2 == 0xE0) {
-      AppendToBuffer("fnstsw_ax");
-      return 2;
-    }
-    int mod, regop, rm;
-    get_modrm(*(data + 1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case 5:
-        mnem = "fild_d";
-        break;
-      case 7:
-        mnem = "fistp_d";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDC || b1 == 0xDE) {
-    bool is_pop = (b1 == 0xDE);
-    if (is_pop && b2 == 0xD9) {
-      AppendToBuffer("fcompp");
-      return 2;
-    }
-    const char* mnem = "FP0xDC";
-    switch (b2 & 0xF8) {
-      case 0xC0:
-        mnem = "fadd";
-        break;
-      case 0xE8:
-        mnem = "fsub";
-        break;
-      case 0xC8:
-        mnem = "fmul";
-        break;
-      case 0xF8:
-        mnem = "fdiv";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
-    return 2;
-  } else if (b1 == 0xDA && b2 == 0xE9) {
-    const char* mnem = "fucompp";
-    AppendToBuffer("%s", mnem);
-    return 2;
+  byte escape_opcode = *data;
+  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  byte modrm_byte = *(data+1);
+
+  if (modrm_byte >= 0xC0) {
+    return RegisterFPUInstruction(escape_opcode, modrm_byte);
+  } else {
+    return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
   }
-  AppendToBuffer("Unknown FP instruction");
+}
+
+int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
+                                           int modrm_byte,
+                                           byte* modrm_start) {
+  const char* mnem = "?";
+  int regop = (modrm_byte >> 3) & 0x7;  // reg/op field of modrm byte.
+  switch (escape_opcode) {
+    case 0xD9: switch (regop) {
+        case 0: mnem = "fld_s"; break;
+        case 3: mnem = "fstp_s"; break;
+        case 7: mnem = "fstcw"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB: switch (regop) {
+        case 0: mnem = "fild_s"; break;
+        case 1: mnem = "fisttp_s"; break;
+        case 2: mnem = "fist_s"; break;
+        case 3: mnem = "fistp_s"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD: switch (regop) {
+        case 0: mnem = "fld_d"; break;
+        case 3: mnem = "fstp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDF: switch (regop) {
+        case 5: mnem = "fild_d"; break;
+        case 7: mnem = "fistp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(modrm_start);
+  return count + 1;
+}
+
+int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
+                                             byte modrm_byte) {
+  bool has_register = false;  // Is the FPU register encoded in modrm_byte?
+  const char* mnem = "?";
+
+  switch (escape_opcode) {
+    case 0xD8:
+      UnimplementedInstruction();
+      break;
+
+    case 0xD9:
+      switch (modrm_byte & 0xF8) {
+        case 0xC8:
+          mnem = "fxch";
+          has_register = true;
+          break;
+        default:
+          switch (modrm_byte) {
+            case 0xE0: mnem = "fchs"; break;
+            case 0xE1: mnem = "fabs"; break;
+            case 0xE4: mnem = "ftst"; break;
+            case 0xE8: mnem = "fld1"; break;
+            case 0xEE: mnem = "fldz"; break;
+            case 0xF5: mnem = "fprem1"; break;
+            case 0xF7: mnem = "fincstp"; break;
+            case 0xF8: mnem = "fprem"; break;
+            case 0xFE: mnem = "fsin"; break;
+            case 0xFF: mnem = "fcos"; break;
+            default: UnimplementedInstruction();
+          }
+      }
+      break;
+
+    case 0xDA:
+      if (modrm_byte == 0xE9) {
+        mnem = "fucompp";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB:
+      if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomi";
+        has_register = true;
+      } else if (modrm_byte  == 0xE2) {
+        mnem = "fclex";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDC:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "fadd"; break;
+        case 0xE8: mnem = "fsub"; break;
+        case 0xC8: mnem = "fmul"; break;
+        case 0xF8: mnem = "fdiv"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "ffree"; break;
+        case 0xD8: mnem = "fstp"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDE:
+      if (modrm_byte  == 0xD9) {
+        mnem = "fcompp";
+      } else {
+        has_register = true;
+        switch (modrm_byte & 0xF8) {
+          case 0xC0: mnem = "faddp"; break;
+          case 0xE8: mnem = "fsubp"; break;
+          case 0xC8: mnem = "fmulp"; break;
+          case 0xF8: mnem = "fdivp"; break;
+          default: UnimplementedInstruction();
+        }
+      }
+      break;
+
+    case 0xDF:
+      if (modrm_byte == 0xE0) {
+        mnem = "fnstsw_ax";
+      } else if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomip";
+        has_register = true;
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+
+  if (has_register) {
+    AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+  } else {
+    AppendToBuffer("%s", mnem);
+  }
   return 2;
 }
 
 
+
 // Handle all two-byte opcodes, which start with 0x0F.
 // These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
 // We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1045,13 +1049,13 @@
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
-      data += PrintRightOperand(data);
+      current += PrintRightOperand(current);
     } else if ((opcode & 0xF8) == 0x58) {
       // XMM arithmetic. Mnemonic was retrieved at the start of this function.
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
-      AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
-                     NameOfXMMRegister(rm));
+      AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1060,12 +1064,12 @@
 
     // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
     // Assert that mod is not 3, so source is memory, not an XMM register.
-    ASSERT((*current & 0xC0) != 0xC0);
+    ASSERT_NE(0xC0, *current & 0xC0);
     current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
   } else {
     UnimplementedInstruction();
   }
-  return current - data;
+  return static_cast<int>(current - data);
 }
 
 
@@ -1236,18 +1240,6 @@
         break;
       }
 
-      case 0xF6: {
-        int mod, regop, rm;
-        get_modrm(*(data + 1), &mod, &regop, &rm);
-        if (mod == 3 && regop == 0) {
-          AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
-        } else {
-          UnimplementedInstruction();
-        }
-        data += 3;
-        break;
-      }
-
       case 0x81:  // fall through
       case 0x83:  // 0x81 with sign extension bit set
         data += PrintImmediateOp(data);
@@ -1344,7 +1336,7 @@
       case 0x95:
       case 0x96:
       case 0x97: {
-        int reg = (current & 0x7) | (rex_b() ? 8 : 0);
+        int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
         if (reg == 0) {
           AppendToBuffer("nop");  // Common name for xchg rax,rax.
         } else {
@@ -1352,8 +1344,9 @@
                          operand_size_code(),
                          NameOfCPURegister(reg));
         }
+        data++;
       }
-
+        break;
 
       case 0xFE: {
         data++;
@@ -1465,8 +1458,10 @@
         data += JumpShort(data);
         break;
 
+      case 0xF6:
+        byte_size_operand_ = true;  // fall through
       case 0xF7:
-        data += F7Instruction(data);
+        data += F6F7Instruction(data);
         break;
 
       default:
@@ -1479,7 +1474,7 @@
     tmp_buffer_[tmp_buffer_pos_] = '\0';
   }
 
-  int instr_len = data - instr;
+  int instr_len = static_cast<int>(data - instr);
   ASSERT(instr_len > 0);  // Ensure progress.
 
   int outp = 0;
@@ -1591,7 +1586,7 @@
     for (byte* bp = prev_pc; bp < pc; bp++) {
       fprintf(f, "%02x", *bp);
     }
-    for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+    for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
       fprintf(f, "  ");
     }
     fprintf(f, "  %s\n", buffer.start());
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 46d8dc4..333a47d 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "codegen-inl.h"
+#include "compiler.h"
 #include "debug.h"
 #include "fast-codegen.h"
 #include "parser.h"
@@ -61,11 +62,76 @@
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = fun->scope()->num_stack_slots();
-    for (int i = 0; i < locals_count; i++) {
-      __ PushRoot(Heap::kUndefinedValueRootIndex);
+    if (locals_count <= 1) {
+      if (locals_count > 0) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+    } else {
+      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      for (int i = 0; i < locals_count; i++) {
+        __ push(rdx);
+      }
     }
   }
 
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  if (fun->scope()->num_heap_slots() > 0) {
+    Comment cmnt(masm_, "[ Allocate local context");
+    // Argument to NewContext is the function, which is still in rdi.
+    __ push(rdi);
+    __ CallRuntime(Runtime::kNewContext, 1);
+    function_in_register = false;
+    // Context is returned in both rax and rsi.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in rsi.
+    __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+    // Copy any necessary parameters into the context.
+    int num_parameters = fun->scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Slot* slot = fun->scope()->parameter(i)->slot();
+      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ movq(rax, Operand(rbp, parameter_offset));
+        // Store it in the context
+        __ movq(Operand(rsi, Context::SlotOffset(slot->index())), rax);
+      }
+    }
+  }
+
+  // Possibly allocate an arguments object.
+  Variable* arguments = fun->scope()->arguments()->AsVariable();
+  if (arguments != NULL) {
+    // Arguments object must be allocated after the context object, in
+    // case the "arguments" or ".arguments" variables are in the context.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (function_in_register) {
+      __ push(rdi);
+    } else {
+      __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    // The receiver is just before the parameters on the caller's stack.
+    __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+                                 fun->num_parameters() * kPointerSize));
+    __ push(rdx);
+    __ Push(Smi::FromInt(fun->num_parameters()));
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiver and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    __ CallStub(&stub);
+    // Store new arguments object in both "arguments" and ".arguments" slots.
+    __ movq(rcx, rax);
+    Move(arguments->slot(), rax, rbx, rdx);
+    Slot* dot_arguments_slot =
+        fun->scope()->arguments_shadow()->AsVariable()->slot();
+    Move(dot_arguments_slot, rcx, rbx, rdx);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
     Label ok;
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -84,38 +150,371 @@
   }
 
   { Comment cmnt(masm_, "[ Body");
+    ASSERT(loop_depth() == 0);
     VisitStatements(fun->body());
+    ASSERT(loop_depth() == 0);
   }
 
   { Comment cmnt(masm_, "[ return <undefined>;");
-    // Emit a 'return undefined' in case control fell off the end of the
-    // body.
+    // Emit a 'return undefined' in case control fell off the end of the body.
     __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-    SetReturnPosition(fun);
+    EmitReturnSequence(function_->end_position());
+  }
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ jmp(&return_label_);
+  } else {
+    __ bind(&return_label_);
     if (FLAG_trace) {
       __ push(rax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+#endif
+    CodeGenerator::RecordPositions(masm_, position);
     __ RecordJSReturn();
-
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
     __ movq(rsp, rbp);
     __ pop(rbp);
-    __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+    __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Add padding that will be overwritten by a debugger breakpoint.  We
     // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
     // (3 + 1 + 3).
-    const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+    const int kPadding = Assembler::kJSReturnSequenceLength - 7;
     for (int i = 0; i < kPadding; ++i) {
       masm_->int3();
     }
+    // Check that the size of the code used for returning matches what is
+    // expected by the debugger.
+    ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
 #endif
   }
 }
 
 
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      break;
+    case Expression::kValue:
+      __ push(source);
+      break;
+    case Expression::kTest:
+      TestAndBranch(source, true_label_, false_label_);
+      break;
+    case Expression::kValueTest: {
+      Label discard;
+      __ push(source);
+      TestAndBranch(source, true_label_, &discard);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(false_label_);
+      break;
+    }
+    case Expression::kTestValue: {
+      Label discard;
+      __ push(source);
+      TestAndBranch(source, &discard, false_label_);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(true_label_);
+      break;
+    }
+  }
+}
+
+
+template <>
+Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
+                                                      Register scratch) {
+  switch (source->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      return Operand(rbp, SlotOffset(source));
+    case Slot::CONTEXT: {
+      int context_chain_length =
+          function_->scope()->ContextChainLength(source->var()->scope());
+      __ LoadContext(scratch, context_chain_length);
+      return CodeGenerator::ContextOperand(scratch, source->index());
+      break;
+    }
+    case Slot::LOOKUP:
+      UNIMPLEMENTED();
+      // Fall-through.
+    default:
+      UNREACHABLE();
+      return Operand(rax, 0);  // Dead code to make the compiler happy.
+  }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+  Operand location = CreateSlotOperand<Operand>(source, dst);
+  __ movq(dst, location);
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+                             Slot* source,
+                             Register scratch) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Operand location = CreateSlotOperand<Operand>(source, scratch);
+      __ push(location);
+      break;
+    }
+    case Expression::kTest:  // Fall through.
+    case Expression::kValueTest:  // Fall through.
+    case Expression::kTestValue:
+      Move(scratch, source);
+      Move(context, scratch);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      break;
+    case Expression::kValue:
+      __ Push(expr->handle());
+      break;
+    case Expression::kTest:  // Fall through.
+    case Expression::kValueTest:  // Fall through.
+    case Expression::kTestValue:
+      __ Move(rax, expr->handle());
+      Move(context, rax);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+                             Register src,
+                             Register scratch1,
+                             Register scratch2) {
+  switch (dst->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      __ movq(Operand(rbp, SlotOffset(dst)), src);
+      break;
+    case Slot::CONTEXT: {
+      ASSERT(!src.is(scratch1));
+      ASSERT(!src.is(scratch2));
+      ASSERT(!scratch1.is(scratch2));
+      int context_chain_length =
+          function_->scope()->ContextChainLength(dst->var()->scope());
+      __ LoadContext(scratch1, context_chain_length);
+      __ movq(Operand(scratch1, Context::SlotOffset(dst->index())), src);
+      int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+      __ RecordWrite(scratch1, offset, src, scratch2);
+      break;
+    }
+    case Slot::LOOKUP:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+                                    Register source,
+                                    int drop_count) {
+  ASSERT(drop_count > 0);
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      __ addq(rsp, Immediate(drop_count * kPointerSize));
+      break;
+    case Expression::kValue:
+      if (drop_count > 1) {
+        __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+      }
+      __ movq(Operand(rsp, 0), source);
+      break;
+    case Expression::kTest:
+      ASSERT(!source.is(rsp));
+      __ addq(rsp, Immediate(drop_count * kPointerSize));
+      TestAndBranch(source, true_label_, false_label_);
+      break;
+    case Expression::kValueTest: {
+      Label discard;
+      if (drop_count > 1) {
+        __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+      }
+      __ movq(Operand(rsp, 0), source);
+      TestAndBranch(source, true_label_, &discard);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(false_label_);
+      break;
+    }
+    case Expression::kTestValue: {
+      Label discard;
+      __ movq(Operand(rsp, 0), source);
+      TestAndBranch(source, &discard, false_label_);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(true_label_);
+      break;
+    }
+  }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+                                      Label* true_label,
+                                      Label* false_label) {
+  ASSERT_NE(NULL, true_label);
+  ASSERT_NE(NULL, false_label);
+  // Use the shared ToBoolean stub to compile the value in the register into
+  // control flow to the code generator's true and false labels.  Perform
+  // the fast checks assumed by the stub.
+
+  // The undefined value is false.
+  __ CompareRoot(source, Heap::kUndefinedValueRootIndex);
+  __ j(equal, false_label);
+  __ CompareRoot(source, Heap::kTrueValueRootIndex);  // True is true.
+  __ j(equal, true_label);
+  __ CompareRoot(source, Heap::kFalseValueRootIndex);  // False is false.
+  __ j(equal, false_label);
+  ASSERT_EQ(0, kSmiTag);
+  __ SmiCompare(source, Smi::FromInt(0));  // The smi zero is false.
+  __ j(equal, false_label);
+  Condition is_smi = masm_->CheckSmi(source);  // All other smis are true.
+  __ j(is_smi, true_label);
+
+  // Call the stub for all other cases.
+  __ push(source);
+  ToBooleanStub stub;
+  __ CallStub(&stub);
+  __ testq(rax, rax);  // The stub returns nonzero for true.
+  __ j(not_zero, true_label);
+  __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+  Comment cmnt(masm_, "[ Declaration");
+  Variable* var = decl->proxy()->var();
+  ASSERT(var != NULL);  // Must have been resolved.
+  Slot* slot = var->slot();
+  Property* prop = var->AsProperty();
+
+  if (slot != NULL) {
+    switch (slot->type()) {
+      case Slot::PARAMETER:  // Fall through.
+      case Slot::LOCAL:
+        if (decl->mode() == Variable::CONST) {
+          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+          __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+        } else if (decl->fun() != NULL) {
+          Visit(decl->fun());
+          __ pop(Operand(rbp, SlotOffset(var->slot())));
+        }
+        break;
+
+      case Slot::CONTEXT:
+        // The variable in the decl always resides in the current context.
+        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        if (FLAG_debug_code) {
+          // Check if we have the correct context pointer.
+          __ movq(rbx,
+                  CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
+          __ cmpq(rbx, rsi);
+          __ Check(equal, "Unexpected declaration in current context.");
+        }
+        if (decl->mode() == Variable::CONST) {
+          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+          __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+                  kScratchRegister);
+          // No write barrier since the hole value is in old space.
+        } else if (decl->fun() != NULL) {
+          Visit(decl->fun());
+          __ pop(rax);
+          __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+          int offset = Context::SlotOffset(slot->index());
+          __ RecordWrite(rsi, offset, rax, rcx);
+        }
+        break;
+
+      case Slot::LOOKUP: {
+        __ push(rsi);
+        __ Push(var->name());
+        // Declaration nodes are always introduced in one of two modes.
+        ASSERT(decl->mode() == Variable::VAR ||
+               decl->mode() == Variable::CONST);
+        PropertyAttributes attr =
+            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+        __ Push(Smi::FromInt(attr));
+        // Push initial value, if any.
+        // Note: For variables we must not push an initial value (such as
+        // 'undefined') because we may have a (legal) redeclaration and we
+        // must not destroy the current value.
+        if (decl->mode() == Variable::CONST) {
+          __ PushRoot(Heap::kTheHoleValueRootIndex);
+        } else if (decl->fun() != NULL) {
+          Visit(decl->fun());
+        } else {
+          __ Push(Smi::FromInt(0));  // no initial value!
+        }
+        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+        break;
+      }
+    }
+
+  } else if (prop != NULL) {
+    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+      // We are declaring a function or constant that rewrites to a
+      // property.  Use (keyed) IC to set the initial value.
+      ASSERT_EQ(Expression::kValue, prop->obj()->context());
+      Visit(prop->obj());
+      ASSERT_EQ(Expression::kValue, prop->key()->context());
+      Visit(prop->key());
+
+      if (decl->fun() != NULL) {
+        ASSERT_EQ(Expression::kValue, decl->fun()->context());
+        Visit(decl->fun());
+        __ pop(rax);
+      } else {
+        __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+      }
+
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+
+      // Absence of a test rax instruction following the call
+      // indicates that none of the load was inlined.
+
+      // Value in rax is ignored (declarations are statements).  Receiver
+      // and key on stack are discarded.
+      __ addq(rsp, Immediate(2 * kPointerSize));
+    }
+  }
+}
+
+
 void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
@@ -126,56 +525,17 @@
 }
 
 
-void FastCodeGenerator::VisitBlock(Block* stmt) {
-  Comment cmnt(masm_, "[ Block");
-  SetStatementPosition(stmt);
-  VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  SetStatementPosition(stmt);
-  Visit(stmt->expression());
-}
-
-
 void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   Comment cmnt(masm_, "[ ReturnStatement");
-  SetStatementPosition(stmt);
   Expression* expr = stmt->expression();
-  Visit(expr);
-
-  // Complete the statement based on the location of the subexpression.
-  Location source = expr->location();
-  ASSERT(!source.is_nowhere());
-  if (source.is_temporary()) {
-    __ pop(rax);
-  } else {
-    ASSERT(source.is_constant());
-    ASSERT(expr->AsLiteral() != NULL);
+  if (expr->AsLiteral() != NULL) {
     __ Move(rax, expr->AsLiteral()->handle());
+  } else {
+    Visit(expr);
+    ASSERT_EQ(Expression::kValue, expr->context());
+    __ pop(rax);
   }
-  if (FLAG_trace) {
-    __ push(rax);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-
-  __ RecordJSReturn();
-  // Do not use the leave instruction here because it is too short to
-  // patch with the code required by the debugger.
-  __ movq(rsp, rbp);
-  __ pop(rbp);
-  __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Add padding that will be overwritten by a debugger breakpoint.  We
-  // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
-  // (3 + 1 + 3).
-  const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
-  for (int i = 0; i < kPadding; ++i) {
-    masm_->int3();
-  }
-#endif
+  EmitReturnSequence(stmt->statement_pos());
 }
 
 
@@ -183,7 +543,8 @@
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  Handle<JSFunction> boilerplate =
+      Compiler::BuildBoilerplate(expr, script_, this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
@@ -192,12 +553,7 @@
   __ push(rsi);
   __ Push(boilerplate);
   __ CallRuntime(Runtime::kNewClosure, 2);
-
-  if (expr->location().is_temporary()) {
-    __ push(rax);
-  } else {
-    ASSERT(expr->location().is_nowhere());
-  }
+  Move(expr->context(), rax);
 }
 
 
@@ -205,6 +561,7 @@
   Comment cmnt(masm_, "[ VariableProxy");
   Expression* rewrite = expr->var()->rewrite();
   if (rewrite == NULL) {
+    ASSERT(expr->var()->is_global());
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in rcx and the global
     // object on the stack.
@@ -212,33 +569,73 @@
     __ Move(rcx, expr->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
     // A test rax instruction following the call is used by the IC to
     // indicate that the inobject property case was inlined.  Ensure there
     // is no test rax instruction here.
-    if (expr->location().is_temporary()) {
-      // Replace the global object with the result.
-      __ movq(Operand(rsp, 0), rax);
-    } else {
-      ASSERT(expr->location().is_nowhere());
-      __ addq(rsp, Immediate(kPointerSize));
-    }
+    __ nop();
 
-  } else {
-    Comment cmnt(masm_, "Stack slot");
+    DropAndMove(expr->context(), rax);
+  } else if (rewrite->AsSlot() != NULL) {
     Slot* slot = rewrite->AsSlot();
-    ASSERT(slot != NULL);
-    if (expr->location().is_temporary()) {
-      __ push(Operand(rbp, SlotOffset(slot)));
-    } else {
-      ASSERT(expr->location().is_nowhere());
+    if (FLAG_debug_code) {
+      switch (slot->type()) {
+        case Slot::LOCAL:
+        case Slot::PARAMETER: {
+          Comment cmnt(masm_, "Stack slot");
+          break;
+        }
+        case Slot::CONTEXT: {
+          Comment cmnt(masm_, "Context slot");
+          break;
+        }
+        case Slot::LOOKUP:
+          UNIMPLEMENTED();
+          break;
+        default:
+          UNREACHABLE();
+      }
     }
+    Move(expr->context(), slot, rax);
+  } else {
+    // A variable has been rewritten into an explicit access to
+    // an object property.
+    Property* property = rewrite->AsProperty();
+    ASSERT_NOT_NULL(property);
+
+    // Currently the only parameter expressions that can occur are
+    // on the form "slot[literal]".
+
+    // Check that the object is in a slot.
+    Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+    ASSERT_NOT_NULL(object);
+    Slot* object_slot = object->slot();
+    ASSERT_NOT_NULL(object_slot);
+
+    // Load the object.
+    Move(Expression::kValue, object_slot, rax);
+
+    // Check that the key is a smi.
+    Literal* key_literal = property->key()->AsLiteral();
+    ASSERT_NOT_NULL(key_literal);
+    ASSERT(key_literal->handle()->IsSmi());
+
+    // Load the key.
+    Move(Expression::kValue, key_literal);
+
+    // Do a KEYED property load.
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // Notice: We must not have a "test rax, ..." instruction after
+    // the call. It is treated specially by the LoadIC code.
+
+    // Drop key and object left on the stack by IC, and push the result.
+    DropAndMove(expr->context(), rax, 2);
   }
 }
 
 
 void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExp Literal");
+  Comment cmnt(masm_, "[ RegExpLiteral");
   Label done;
   // Registers will be used as follows:
   // rdi = JS function.
@@ -260,10 +657,126 @@
   __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   // Label done:
   __ bind(&done);
-  if (expr->location().is_temporary()) {
-    __ push(rax);
+  Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  Label boilerplate_exists;
+
+  __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ movq(rax, FieldOperand(rbx, literal_offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &boilerplate_exists);
+  // Create boilerplate if it does not exist.
+  // Literal array (0).
+  __ push(rbx);
+  // Literal index (1).
+  __ Push(Smi::FromInt(expr->literal_index()));
+  // Constant properties (2).
+  __ Push(expr->constant_properties());
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ bind(&boilerplate_exists);
+  // rax contains boilerplate.
+  // Clone boilerplate.
+  __ push(rax);
+  if (expr->depth() == 1) {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
   } else {
-    ASSERT(expr->location().is_nowhere());
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  }
+
+  // If result_saved == true: The result is saved on top of the
+  //  stack and in rax.
+  // If result_saved == false: The result not on the stack, just in rax.
+  bool result_saved = false;
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(rax);  // Save result on the stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:  // fall through
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          Visit(value);
+          ASSERT_EQ(Expression::kValue, value->context());
+          __ pop(rax);
+          __ Move(rcx, key->handle());
+          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+          __ call(ic, RelocInfo::CODE_TARGET);
+          // StoreIC leaves the receiver on the stack.
+          __ movq(rax, Operand(rsp, 0));  // Restore result back into rax.
+          break;
+        }
+        // fall through
+      case ObjectLiteral::Property::PROTOTYPE:
+        __ push(rax);
+        Visit(key);
+        ASSERT_EQ(Expression::kValue, key->context());
+        Visit(value);
+        ASSERT_EQ(Expression::kValue, value->context());
+        __ CallRuntime(Runtime::kSetProperty, 3);
+        __ movq(rax, Operand(rsp, 0));  // Restore result into rax.
+        break;
+      case ObjectLiteral::Property::SETTER:  // fall through
+      case ObjectLiteral::Property::GETTER:
+        __ push(rax);
+        Visit(key);
+        ASSERT_EQ(Expression::kValue, key->context());
+        __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+                Smi::FromInt(1) :
+                Smi::FromInt(0));
+        Visit(value);
+        ASSERT_EQ(Expression::kValue, value->context());
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        __ movq(rax, Operand(rsp, 0));  // Restore result into rax.
+        break;
+      default: UNREACHABLE();
+    }
+  }
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+      break;
+    case Expression::kValue:
+      if (!result_saved) __ push(rax);
+      break;
+    case Expression::kTest:
+      if (result_saved) __ pop(rax);
+      TestAndBranch(rax, true_label_, false_label_);
+      break;
+    case Expression::kValueTest: {
+      Label discard;
+      if (!result_saved) __ push(rax);
+      TestAndBranch(rax, true_label_, &discard);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(false_label_);
+      break;
+    }
+    case Expression::kTestValue: {
+      Label discard;
+      if (!result_saved) __ push(rax);
+      TestAndBranch(rax, &discard, false_label_);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(true_label_);
+      break;
+    }
   }
 }
 
@@ -316,7 +829,7 @@
       result_saved = true;
     }
     Visit(subexpr);
-    ASSERT(subexpr->location().is_temporary());
+    ASSERT_EQ(Expression::kValue, subexpr->context());
 
     // Store the subexpression value in the array's elements.
     __ pop(rax);  // Subexpression value.
@@ -329,231 +842,849 @@
     __ RecordWrite(rbx, offset, rax, rcx);
   }
 
-  Location destination = expr->location();
-  if (destination.is_nowhere() && result_saved) {
-    __ addq(rsp, Immediate(kPointerSize));
-  } else if (destination.is_temporary() && !result_saved) {
-    __ push(rax);
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:
+      if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+      break;
+    case Expression::kValue:
+      if (!result_saved) __ push(rax);
+      break;
+    case Expression::kTest:
+      if (result_saved) __ pop(rax);
+      TestAndBranch(rax, true_label_, false_label_);
+      break;
+    case Expression::kValueTest: {
+      Label discard;
+      if (!result_saved) __ push(rax);
+      TestAndBranch(rax, true_label_, &discard);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(false_label_);
+      break;
+    }
+    case Expression::kTestValue: {
+      Label discard;
+      if (!result_saved) __ push(rax);
+      TestAndBranch(rax, &discard, false_label_);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(true_label_);
+      break;
+    }
   }
 }
 
 
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
-  Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
-  Expression* rhs = expr->value();
-  Visit(rhs);
-
-  // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
-
-  // Complete the assignment based on the location of the right-hand-side
-  // value and the desired location of the assignment value.
-  Location destination = expr->location();
-  Location source = rhs->location();
-  ASSERT(!destination.is_constant());
-  ASSERT(!source.is_nowhere());
-
   if (var->is_global()) {
-    // Assignment to a global variable, use inline caching.  Right-hand-side
-    // value is passed in rax, variable name in rcx, and the global object
-    // on the stack.
-    if (source.is_temporary()) {
-      __ pop(rax);
-    } else {
-      ASSERT(source.is_constant());
-      ASSERT(rhs->AsLiteral() != NULL);
-      __ Move(rax, rhs->AsLiteral()->handle());
-    }
+    // Assignment to a global variable.  Use inline caching for the
+    // assignment.  Right-hand-side value is passed in rax, variable name in
+    // rcx, and the global object on the stack.
+    __ pop(rax);
     __ Move(rcx, var->name());
     __ push(CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // Overwrite the global object on the stack with the result if needed.
-    if (destination.is_temporary()) {
-      __ movq(Operand(rsp, 0), rax);
-    } else {
-      __ addq(rsp, Immediate(kPointerSize));
-    }
-  } else {
-    if (source.is_temporary()) {
-      if (destination.is_temporary()) {
-        // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side temporary
-        // on the stack.
-        __ movq(kScratchRegister, Operand(rsp, 0));
-        __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
-      } else {
-        ASSERT(destination.is_nowhere());
-        // Case 'var = temp'.  Discard right-hand-side temporary.
-        __ pop(Operand(rbp, SlotOffset(var->slot())));
+    DropAndMove(expr->context(), rax);
+
+  } else if (var->slot()) {
+    Slot* slot = var->slot();
+    ASSERT_NOT_NULL(slot);  // Variables rewritten as properties not handled.
+    switch (slot->type()) {
+      case Slot::LOCAL:
+      case Slot::PARAMETER: {
+        switch (expr->context()) {
+          case Expression::kUninitialized:
+            UNREACHABLE();
+          case Expression::kEffect:
+            // Perform assignment and discard value.
+            __ pop(Operand(rbp, SlotOffset(var->slot())));
+            break;
+          case Expression::kValue:
+            // Perform assignment and preserve value.
+            __ movq(rax, Operand(rsp, 0));
+            __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+            break;
+          case Expression::kTest:
+            // Perform assignment and test (and discard) value.
+            __ pop(rax);
+            __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+            TestAndBranch(rax, true_label_, false_label_);
+            break;
+          case Expression::kValueTest: {
+            Label discard;
+            __ movq(rax, Operand(rsp, 0));
+            __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+            TestAndBranch(rax, true_label_, &discard);
+            __ bind(&discard);
+            __ addq(rsp, Immediate(kPointerSize));
+            __ jmp(false_label_);
+            break;
+          }
+          case Expression::kTestValue: {
+            Label discard;
+            __ movq(rax, Operand(rsp, 0));
+            __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+            TestAndBranch(rax, &discard, false_label_);
+            __ bind(&discard);
+            __ addq(rsp, Immediate(kPointerSize));
+            __ jmp(true_label_);
+            break;
+          }
+        }
+        break;
       }
-    } else {
-      ASSERT(source.is_constant());
-      ASSERT(rhs->AsLiteral() != NULL);
-      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
-      // discarded result.  Always perform the assignment.
-      __ Move(kScratchRegister, rhs->AsLiteral()->handle());
-      __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
-      if (destination.is_temporary()) {
-        // Case 'temp <- (var = constant)'.  Save result.
-        __ push(kScratchRegister);
+
+      case Slot::CONTEXT: {
+        int chain_length =
+            function_->scope()->ContextChainLength(slot->var()->scope());
+        if (chain_length > 0) {
+          // Move up the context chain to the context containing the slot.
+          __ movq(rax,
+                  Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+          // Load the function context (which is the incoming, outer context).
+          __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+          for (int i = 1; i < chain_length; i++) {
+            __ movq(rax,
+                    Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)));
+            __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+          }
+        } else {  // Slot is in the current context.  Generate optimized code.
+          __ movq(rax, rsi);  // RecordWrite destroys the object register.
+        }
+        if (FLAG_debug_code) {
+          __ cmpq(rax,
+                  Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+          __ Check(equal, "Context Slot chain length wrong.");
+        }
+        __ pop(rcx);
+        __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx);
+
+        // RecordWrite may destroy all its register arguments.
+        if (expr->context() == Expression::kValue) {
+          __ push(rcx);
+        } else if (expr->context() != Expression::kEffect) {
+          __ movq(rdx, rcx);
+        }
+        int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+        __ RecordWrite(rax, offset, rcx, rbx);
+        if (expr->context() != Expression::kEffect &&
+            expr->context() != Expression::kValue) {
+          Move(expr->context(), rdx);
+        }
+        break;
       }
+
+      case Slot::LOOKUP:
+        UNREACHABLE();
+        break;
     }
   }
 }
 
 
-void FastCodeGenerator::VisitCall(Call* expr) {
-  Expression* fun = expr->expression();
-  ZoneList<Expression*>* args = expr->arguments();
-  Variable* var = fun->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL && !var->is_this() && var->is_global());
-  ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  ASSERT(prop->key()->AsLiteral() != NULL);
 
-  __ Push(var->name());
-  // Push global object (receiver).
-  __ push(CodeGenerator::GlobalObject());
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(Operand(rsp, kPointerSize));  // Receiver is under value.
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  __ pop(rax);
+  __ Move(rcx, prop->key()->AsLiteral()->handle());
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(rax);  // Result of assignment, saved even if not needed.
+    __ push(Operand(rsp, kPointerSize));  // Receiver is under value.
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(rax);
+  }
+
+  DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a keyed store IC.
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    // Reciever is under the key and value.
+    __ push(Operand(rsp, 2 * kPointerSize));
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  __ pop(rax);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // This nop signals to the IC that there is no inlined code at the call
+  // site for it to patch.
+  __ nop();
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(rax);  // Result of assignment, saved even if not needed.
+    // Reciever is under the key and value.
+    __ push(Operand(rsp, 2 * kPointerSize));
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(rax);
+  }
+
+  // Receiver and key are still on stack.
+  __ addq(rsp, Immediate(2 * kPointerSize));
+  Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+  uint32_t dummy;
+
+  // Record the source position for the property load.
+  SetSourcePosition(expr->position());
+
+  // Evaluate receiver.
+  Visit(expr->obj());
+
+
+  if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+      !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+    // Do a NAMED property load.
+    // The IC expects the property name in rcx and the receiver on the stack.
+    __ Move(rcx, key->AsLiteral()->handle());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test rax,..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+  } else {
+    // Do a KEYED property load.
+    Visit(expr->key());
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // Notice: We must not have a "test rax, ..." instruction after
+    // the call. It is treated specially by the LoadIC code.
+
+    // Drop key left on the stack by IC.
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+  DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
     Visit(args->at(i));
-    ASSERT(!args->at(i)->location().is_nowhere());
-    if (args->at(i)->location().is_constant()) {
-      ASSERT(args->at(i)->AsLiteral() != NULL);
-      __ Push(args->at(i)->AsLiteral()->handle());
-    }
+    ASSERT_EQ(Expression::kValue, args->at(i)->context());
   }
-  // Record source position for debugger
+  // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                          NOT_IN_LOOP);
-  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  __ call(ic, reloc_info);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
-  if (expr->location().is_temporary()) {
-    __ movq(Operand(rsp, 0), rax);
-  } else {
-    ASSERT(expr->location().is_nowhere());
-    __ addq(rsp, Immediate(kPointerSize));
+  DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+  // Code common for calls using the call stub.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
   }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+  __ CallStub(&stub);
+  // Restore context register.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Comment cmnt(masm_, "[ Call");
+  Expression* fun = expr->expression();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // Call to the identifier 'eval'.
+    UNREACHABLE();
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // Call to a global variable.
+    __ Push(var->name());
+    // Push global object as receiver for the call IC lookup.
+    __ push(CodeGenerator::GlobalObject());
+    EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    // Call to a lookup slot.
+    UNREACHABLE();
+  } else if (fun->AsProperty() != NULL) {
+    // Call to an object property.
+    Property* prop = fun->AsProperty();
+    Literal* key = prop->key()->AsLiteral();
+    if (key != NULL && key->handle()->IsSymbol()) {
+      // Call to a named property, use call IC.
+      __ Push(key->handle());
+      Visit(prop->obj());
+      EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+    } else {
+      // Call to a keyed property, use keyed load IC followed by function
+      // call.
+      Visit(prop->obj());
+      Visit(prop->key());
+      // Record source code position for IC call.
+      SetSourcePosition(prop->position());
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+      // By emitting a nop we make sure that we do not have a "test rax,..."
+      // instruction after the call it is treated specially by the LoadIC code.
+      __ nop();
+      // Drop key left on the stack by IC.
+      __ addq(rsp, Immediate(kPointerSize));
+      // Pop receiver.
+      __ pop(rbx);
+      // Push result (function).
+      __ push(rax);
+      // Push receiver object on stack.
+      if (prop->is_synthetic()) {
+        __ push(CodeGenerator::GlobalObject());
+      } else {
+        __ push(rbx);
+      }
+      EmitCallWithStub(expr);
+    }
+  } else {
+    // Call to some other expression.  If the expression is an anonymous
+    // function literal not called in a loop, mark it as one that should
+    // also use the fast code generator.
+    FunctionLiteral* lit = fun->AsFunctionLiteral();
+    if (lit != NULL &&
+        lit->name()->Equals(Heap::empty_string()) &&
+        loop_depth() == 0) {
+      lit->set_try_fast_codegen(true);
+    }
+    Visit(fun);
+    // Load global receiver object.
+    __ movq(rbx, CodeGenerator::GlobalObject());
+    __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    // Emit function call.
+    EmitCallWithStub(expr);
+  }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+  // Push function on the stack.
+  Visit(expr->expression());
+  ASSERT_EQ(Expression::kValue, expr->expression()->context());
+  // If location is value, already on the stack,
+
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT_EQ(Expression::kValue, args->at(i)->context());
+    // If location is value, it is already on the stack,
+    // so nothing to do here.
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function, arg_count into rdi and rax.
+  __ Set(rax, arg_count);
+  // Function is in rsp[arg_count + 1].
+  __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+
+  Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+  // Replace function on TOS with result in rax, or pop it.
+  DropAndMove(expr->context(), rax);
 }
 
 
 void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
-  Runtime::Function* function = expr->function();
 
-  ASSERT(function != NULL);
+  if (expr->is_jsruntime()) {
+    // Prepare for calling JS runtime function.
+    __ Push(expr->name());
+    __ movq(rax, CodeGenerator::GlobalObject());
+    __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+  }
 
   // Push the arguments ("left-to-right").
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
     Visit(args->at(i));
-    ASSERT(!args->at(i)->location().is_nowhere());
-    if (args->at(i)->location().is_constant()) {
-      ASSERT(args->at(i)->AsLiteral() != NULL);
-      __ Push(args->at(i)->AsLiteral()->handle());
-    } else {
-      ASSERT(args->at(i)->location().is_temporary());
-      // If location is temporary, it is already on the stack,
-      // so nothing to do here.
-    }
+    ASSERT_EQ(Expression::kValue, args->at(i)->context());
   }
 
-  __ CallRuntime(function, arg_count);
-  if (expr->location().is_temporary()) {
-    __ push(rax);
+  if (expr->is_jsruntime()) {
+    // Call the JS runtime function.
+    Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                           NOT_IN_LOOP);
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // Restore context register.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    // Discard the function left on TOS.
+    DropAndMove(expr->context(), rax);
   } else {
-    ASSERT(expr->location().is_nowhere());
+    __ CallRuntime(expr->function(), arg_count);
+    Move(expr->context(), rax);
+  }
+}
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  Comment cmnt(masm_, "[ CountOperation");
+  VariableProxy* proxy = expr->expression()->AsVariableProxy();
+  ASSERT(proxy->AsVariable() != NULL);
+  ASSERT(proxy->AsVariable()->is_global());
+
+  Visit(proxy);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kValue:  // Fall through
+    case Expression::kTest:  // Fall through
+    case Expression::kTestValue:  // Fall through
+    case Expression::kValueTest:
+      // Duplicate the result on the stack.
+      __ push(rax);
+      break;
+    case Expression::kEffect:
+      // Do not save result.
+      break;
+  }
+  // Call runtime for +1/-1.
+  __ push(rax);
+  __ Push(Smi::FromInt(1));
+  if (expr->op() == Token::INC) {
+    __ CallRuntime(Runtime::kNumberAdd, 2);
+  } else {
+    __ CallRuntime(Runtime::kNumberSub, 2);
+  }
+  // Call Store IC.
+  __ Move(rcx, proxy->AsVariable()->name());
+  __ push(CodeGenerator::GlobalObject());
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // Restore up stack after store IC
+  __ addq(rsp, Immediate(kPointerSize));
+
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+    case Expression::kEffect:  // Fall through
+    case Expression::kValue:
+      // Do nothing. Result in either on the stack for value context
+      // or discarded for effect context.
+      break;
+    case Expression::kTest:
+      __ pop(rax);
+      TestAndBranch(rax, true_label_, false_label_);
+      break;
+    case Expression::kValueTest: {
+      Label discard;
+      __ movq(rax, Operand(rsp, 0));
+      TestAndBranch(rax, true_label_, &discard);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(false_label_);
+      break;
+    }
+    case Expression::kTestValue: {
+      Label discard;
+      __ movq(rax, Operand(rsp, 0));
+      TestAndBranch(rax, &discard, false_label_);
+      __ bind(&discard);
+      __ addq(rsp, Immediate(kPointerSize));
+      __ jmp(true_label_);
+      break;
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      Visit(expr->expression());
+      ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+      switch (expr->context()) {
+        case Expression::kUninitialized:
+          UNREACHABLE();
+          break;
+        case Expression::kEffect:
+          break;
+        case Expression::kValue:
+          __ PushRoot(Heap::kUndefinedValueRootIndex);
+          break;
+        case Expression::kTestValue:
+          // Value is false so it's needed.
+          __ PushRoot(Heap::kUndefinedValueRootIndex);
+          // Fall through.
+        case Expression::kTest:  // Fall through.
+        case Expression::kValueTest:
+          __ jmp(false_label_);
+          break;
+      }
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+      Label push_true;
+      Label push_false;
+      Label done;
+      Label* saved_true = true_label_;
+      Label* saved_false = false_label_;
+      switch (expr->context()) {
+        case Expression::kUninitialized:
+          UNREACHABLE();
+          break;
+
+        case Expression::kValue:
+          true_label_ = &push_false;
+          false_label_ = &push_true;
+          Visit(expr->expression());
+          __ bind(&push_true);
+          __ PushRoot(Heap::kTrueValueRootIndex);
+          __ jmp(&done);
+          __ bind(&push_false);
+          __ PushRoot(Heap::kFalseValueRootIndex);
+          __ bind(&done);
+          break;
+
+        case Expression::kEffect:
+          true_label_ = &done;
+          false_label_ = &done;
+          Visit(expr->expression());
+          __ bind(&done);
+          break;
+
+        case Expression::kTest:
+          true_label_ = saved_false;
+          false_label_ = saved_true;
+          Visit(expr->expression());
+          break;
+
+        case Expression::kValueTest:
+          true_label_ = saved_false;
+          false_label_ = &push_true;
+          Visit(expr->expression());
+          __ bind(&push_true);
+          __ PushRoot(Heap::kTrueValueRootIndex);
+          __ jmp(saved_true);
+          break;
+
+        case Expression::kTestValue:
+          true_label_ = &push_false;
+          false_label_ = saved_true;
+          Visit(expr->expression());
+          __ bind(&push_false);
+          __ PushRoot(Heap::kFalseValueRootIndex);
+          __ jmp(saved_false);
+          break;
+      }
+      true_label_ = saved_true;
+      false_label_ = saved_false;
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+      if (proxy != NULL &&
+          !proxy->var()->is_this() &&
+          proxy->var()->is_global()) {
+        Comment cmnt(masm_, "Global variable");
+        __ push(CodeGenerator::GlobalObject());
+        __ Move(rcx, proxy->name());
+        Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+        // Use a regular load, not a contextual load, to avoid a reference
+        // error.
+        __ Call(ic, RelocInfo::CODE_TARGET);
+        __ movq(Operand(rsp, 0), rax);
+      } else if (proxy != NULL &&
+                 proxy->var()->slot() != NULL &&
+                 proxy->var()->slot()->type() == Slot::LOOKUP) {
+        __ push(rsi);
+        __ Push(proxy->name());
+        __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+        __ push(rax);
+      } else {
+        // This expression cannot throw a reference error at the top level.
+        Visit(expr->expression());
+      }
+
+      __ CallRuntime(Runtime::kTypeof, 1);
+      Move(expr->context(), rax);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
   }
 }
 
 
 void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
-  // Compile a short-circuited boolean or operation in a non-test
-  // context.
-  ASSERT(expr->op() == Token::OR);
-  // Compile (e0 || e1) as if it were
-  // (let (temp = e0) temp ? temp : e1).
+  Comment cmnt(masm_, "[ BinaryOperation");
+  switch (expr->op()) {
+    case Token::COMMA:
+      ASSERT_EQ(Expression::kEffect, expr->left()->context());
+      ASSERT_EQ(expr->context(), expr->right()->context());
+      Visit(expr->left());
+      Visit(expr->right());
+      break;
 
-  Label eval_right, done;
-  Location destination = expr->location();
-  ASSERT(!destination.is_constant());
+    case Token::OR:
+    case Token::AND:
+      EmitLogicalOperation(expr);
+      break;
 
-  Expression* left = expr->left();
-  Location left_source = left->location();
-  ASSERT(!left_source.is_nowhere());
+    case Token::ADD:
+    case Token::SUB:
+    case Token::DIV:
+    case Token::MOD:
+    case Token::MUL:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      ASSERT_EQ(Expression::kValue, expr->left()->context());
+      ASSERT_EQ(Expression::kValue, expr->right()->context());
 
-  Expression* right = expr->right();
-  Location right_source = right->location();
-  ASSERT(!right_source.is_nowhere());
+      Visit(expr->left());
+      Visit(expr->right());
+      GenericBinaryOpStub stub(expr->op(),
+                               NO_OVERWRITE,
+                               NO_GENERIC_BINARY_FLAGS);
+      __ CallStub(&stub);
+      Move(expr->context(), rax);
 
-  Visit(left);
-  // Use the shared ToBoolean stub to find the boolean value of the
-  // left-hand subexpression.  Load the value into rax to perform some
-  // inlined checks assumed by the stub.
-  if (left_source.is_temporary()) {
-    if (destination.is_temporary()) {
-      // Copy the left-hand value into rax because we may need it as the
-      // final result.
-      __ movq(rax, Operand(rsp, 0));
-    } else {
-      // Pop the left-hand value into rax because we will not need it as the
-      // final result.
-      __ pop(rax);
+      break;
     }
-  } else {
-    // Load the left-hand value into rax.  Put it on the stack if we may
-    // need it.
-    ASSERT(left->AsLiteral() != NULL);
-    __ Move(rax, left->AsLiteral()->handle());
-    if (destination.is_temporary()) __ push(rax);
+    default:
+      UNREACHABLE();
   }
-  // The left-hand value is in rax.  It is also on the stack iff the
-  // destination location is temporary.
-
-  // Perform fast checks assumed by the stub.
-  // The undefined value is false.
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &eval_right);
-  __ CompareRoot(rax, Heap::kTrueValueRootIndex);  // True is true.
-  __ j(equal, &done);
-  __ CompareRoot(rax, Heap::kFalseValueRootIndex);  // False is false.
-  __ j(equal, &eval_right);
-  ASSERT(kSmiTag == 0);
-  __ SmiCompare(rax, Smi::FromInt(0));  // The smi zero is false.
-  __ j(equal, &eval_right);
-  Condition is_smi = masm_->CheckSmi(rax);  // All other smis are true.
-  __ j(is_smi, &done);
-
-  // Call the stub for all other cases.
-  __ push(rax);
-  ToBooleanStub stub;
-  __ CallStub(&stub);
-  __ testq(rax, rax);  // The stub returns nonzero for true.
-  __ j(not_zero, &done);
-
-  __ bind(&eval_right);
-  // Discard the left-hand value if present on the stack.
-  if (destination.is_temporary()) {
-    __ addq(rsp, Immediate(kPointerSize));
-  }
-  Visit(right);
-
-  // Save or discard the right-hand value as needed.
-  if (destination.is_temporary() && right_source.is_constant()) {
-    ASSERT(right->AsLiteral() != NULL);
-    __ Push(right->AsLiteral()->handle());
-  } else if (destination.is_nowhere() && right_source.is_temporary()) {
-    __ addq(rsp, Immediate(kPointerSize));
-  }
-
-  __ bind(&done);
 }
 
 
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  Comment cmnt(masm_, "[ CompareOperation");
+  ASSERT_EQ(Expression::kValue, expr->left()->context());
+  ASSERT_EQ(Expression::kValue, expr->right()->context());
+  Visit(expr->left());
+  Visit(expr->right());
+
+  // Convert current context to test context: Pre-test code.
+  Label push_true;
+  Label push_false;
+  Label done;
+  Label* saved_true = true_label_;
+  Label* saved_false = false_label_;
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+
+    case Expression::kValue:
+      true_label_ = &push_true;
+      false_label_ = &push_false;
+      break;
+
+    case Expression::kEffect:
+      true_label_ = &done;
+      false_label_ = &done;
+      break;
+
+    case Expression::kTest:
+      break;
+
+    case Expression::kValueTest:
+      true_label_ = &push_true;
+      break;
+
+    case Expression::kTestValue:
+      false_label_ = &push_false;
+      break;
+  }
+  // Convert current context to test context: End pre-test code.
+
+  switch (expr->op()) {
+    case Token::IN: {
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+      __ j(equal, true_label_);
+      __ jmp(false_label_);
+      break;
+    }
+
+    case Token::INSTANCEOF: {
+      InstanceofStub stub;
+      __ CallStub(&stub);
+      __ testq(rax, rax);
+      __ j(zero, true_label_);  // The stub returns 0 for true.
+      __ jmp(false_label_);
+      break;
+    }
+
+    default: {
+      Condition cc = no_condition;
+      bool strict = false;
+      switch (expr->op()) {
+        case Token::EQ_STRICT:
+          strict = true;
+          // Fall through
+        case Token::EQ:
+          cc = equal;
+          __ pop(rax);
+          __ pop(rdx);
+          break;
+        case Token::LT:
+          cc = less;
+          __ pop(rax);
+          __ pop(rdx);
+          break;
+        case Token::GT:
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = less;
+          __ pop(rdx);
+          __ pop(rax);
+         break;
+        case Token::LTE:
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = greater_equal;
+          __ pop(rdx);
+          __ pop(rax);
+          break;
+        case Token::GTE:
+          cc = greater_equal;
+          __ pop(rax);
+          __ pop(rdx);
+          break;
+        case Token::IN:
+        case Token::INSTANCEOF:
+        default:
+          UNREACHABLE();
+      }
+
+      // The comparison stub expects the smi vs. smi case to be handled
+      // before it is called.
+      Label slow_case;
+      __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+      __ SmiCompare(rdx, rax);
+      __ j(cc, true_label_);
+      __ jmp(false_label_);
+
+      __ bind(&slow_case);
+      CompareStub stub(cc, strict);
+      __ CallStub(&stub);
+      __ testq(rax, rax);
+      __ j(cc, true_label_);
+      __ jmp(false_label_);
+    }
+  }
+
+  // Convert current context to test context: Post-test code.
+  switch (expr->context()) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+
+    case Expression::kValue:
+      __ bind(&push_true);
+      __ PushRoot(Heap::kTrueValueRootIndex);
+      __ jmp(&done);
+      __ bind(&push_false);
+      __ PushRoot(Heap::kFalseValueRootIndex);
+      __ bind(&done);
+      break;
+
+    case Expression::kEffect:
+      __ bind(&done);
+      break;
+
+    case Expression::kTest:
+      break;
+
+    case Expression::kValueTest:
+      __ bind(&push_true);
+      __ PushRoot(Heap::kTrueValueRootIndex);
+      __ jmp(saved_true);
+      break;
+
+    case Expression::kTestValue:
+      __ bind(&push_false);
+      __ PushRoot(Heap::kFalseValueRootIndex);
+      __ jmp(saved_false);
+      break;
+  }
+  true_label_ = saved_true;
+  false_label_ = saved_false;
+  // Convert current context to test context: End post-test code.
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  Move(expr->context(), rax);
+}
+
+
+#undef __
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fe224ad..6a0527c 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -57,11 +57,7 @@
   state->sp = sp;
   state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
   // Determine frame type.
-  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
-    return EXIT_DEBUG;
-  } else {
-    return EXIT;
-  }
+  return EXIT;
 }
 
 int JavaScriptFrame::GetProvidedParametersCount() const {
@@ -69,10 +65,10 @@
 }
 
 
-void ExitFrame::Iterate(ObjectVisitor* a) const {
-  // Exit frames on X64 do not contain any pointers. The arguments
-  // are traversed as part of the expression stack of the calling
-  // frame.
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  v->VisitPointer(&code_slot());
+  // The arguments are traversed as part of the expression stack of
+  // the calling frame.
 }
 
 byte* InternalFrame::GetCallerStackPointer() const {
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index eefaa0a..a92b248 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -63,7 +63,7 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kCodeOffset      = -2 * kPointerSize;
   static const int kSPOffset        = -1 * kPointerSize;
 
   static const int kCallerFPOffset  = +0 * kPointerSize;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 2812df1..ccbc615 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -31,6 +31,7 @@
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
@@ -107,7 +108,7 @@
       StringDictionary::kElementsStartIndex * kPointerSize;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
-    __ movl(r1, FieldOperand(name, String::kLengthOffset));
+    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
     __ shrl(r1, Immediate(String::kHashShift));
     if (i > 0) {
       __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
@@ -239,18 +240,6 @@
 }
 
 
-#ifdef DEBUG
-// For use in assert below.
-static int TenToThe(int exponent) {
-  ASSERT(exponent <= 9);
-  ASSERT(exponent >= 1);
-  int answer = 10;
-  for (int i = 1; i < exponent; i++) answer *= 10;
-  return answer;
-}
-#endif
-
-
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0] : return address
@@ -313,7 +302,7 @@
   __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
   __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
-  __ movb(rax, Operand(rcx, rax, times_1, 0));
+  __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
   __ Integer32ToSmi(rax, rax);
   __ ret(0);
 
@@ -327,7 +316,7 @@
   __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
   __ j(above_equal, &slow);
   // Is the string an array index, with cached numeric value?
-  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
   __ testl(rbx, Immediate(String::kIsArrayIndexMask));
 
   // If the string is a symbol, do a quick inline probe of the receiver's
@@ -342,20 +331,16 @@
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
-  // Array index string: If short enough use cache in length/hash field (rbx).
-  // We assert that there are enough bits in an int32_t after the hash shift
-  // bits have been subtracted to allow space for the length and the cached
-  // array index.
+  // If the hash field contains an array index pick it out. The assert checks
+  // that the constants for the maximum number of digits for an array index
+  // cached in the hash field and the number of bits reserved for it does not
+  // conflict.
   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << (String::kShortLengthShift - String::kHashShift)));
+         (1 << String::kArrayIndexValueBits));
   __ bind(&index_string);
-  const int kLengthFieldLimit =
-      (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
-  __ cmpl(rbx, Immediate(kLengthFieldLimit));
-  __ j(above_equal, &slow);
   __ movl(rax, rbx);
-  __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
-  __ shrl(rax, Immediate(String::kLongLengthShift));
+  __ and_(rax, Immediate(String::kArrayIndexHashMask));
+  __ shrl(rax, Immediate(String::kHashShift));
   __ jmp(&index_int);
 }
 
@@ -393,7 +378,7 @@
   // ExternalArray.
   // rax: index (as a smi)
   // rcx: JSObject
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
                  Heap::RootIndexForExternalArrayType(array_type));
   __ j(not_equal, &slow);
@@ -413,7 +398,7 @@
       __ movsxbq(rax, Operand(rcx, rax, times_1, 0));
       break;
     case kExternalUnsignedByteArray:
-      __ movb(rax, Operand(rcx, rax, times_1, 0));
+      __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
       break;
     case kExternalShortArray:
       __ movsxwq(rax, Operand(rcx, rax, times_2, 0));
@@ -790,6 +775,8 @@
   // top of FPU stack: value
   if (array_type == kExternalFloatArray) {
     __ fstp_s(Operand(rcx, rbx, times_4, 0));
+    __ movq(rax, rdx);  // Return the original value.
+    __ ret(0);
   } else {
     // Need to perform float-to-int conversion.
     // Test the top of the FP stack for NaN.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b2f69bb..7115791 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -67,6 +67,12 @@
 }
 
 
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+  CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  j(below, on_stack_overflow);
+}
+
+
 static void RecordWriteHelper(MacroAssembler* masm,
                               Register object,
                               Register addr,
@@ -282,15 +288,19 @@
     RecordComment(msg);
   }
 #endif
+  // Disable stub call restrictions to always allow calls to abort.
+  set_allow_stub_calls(true);
+
   push(rax);
   movq(kScratchRegister, p0, RelocInfo::NONE);
   push(kScratchRegister);
   movq(kScratchRegister,
-       reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
        RelocInfo::NONE);
   push(kScratchRegister);
   CallRuntime(Runtime::kAbort, 2);
   // will not return here
+  int3();
 }
 
 
@@ -402,9 +412,9 @@
   if (x == 0) {
     xor_(dst, dst);
   } else if (is_int32(x)) {
-    movq(dst, Immediate(x));
+    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else if (is_uint32(x)) {
-    movl(dst, Immediate(x));
+    movl(dst, Immediate(static_cast<uint32_t>(x)));
   } else {
     movq(dst, x, RelocInfo::NONE);
   }
@@ -416,9 +426,9 @@
     xor_(kScratchRegister, kScratchRegister);
     movq(dst, kScratchRegister);
   } else if (is_int32(x)) {
-    movq(dst, Immediate(x));
+    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else if (is_uint32(x)) {
-    movl(dst, Immediate(x));
+    movl(dst, Immediate(static_cast<uint32_t>(x)));
   } else {
     movq(kScratchRegister, x, RelocInfo::NONE);
     movq(dst, kScratchRegister);
@@ -1078,7 +1088,7 @@
   SmiToInteger32(rcx, src2);
   // Shift amount specified by lower 5 bits, not six as the shl opcode.
   and_(rcx, Immediate(0x1f));
-  shl(dst);
+  shl_cl(dst);
 }
 
 
@@ -1099,7 +1109,7 @@
   }
   SmiToInteger32(rcx, src2);
   orl(rcx, Immediate(kSmiShift));
-  shr(dst);  // Shift is rcx modulo 0x1f + 32.
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
   shl(dst, Immediate(kSmiShift));
   testq(dst, dst);
   if (src1.is(rcx) || src2.is(rcx)) {
@@ -1135,7 +1145,7 @@
   }
   SmiToInteger32(rcx, src2);
   orl(rcx, Immediate(kSmiShift));
-  sar(dst);  // Shift 32 + original rcx & 0x1f.
+  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
   shl(dst, Immediate(kSmiShift));
   if (src1.is(rcx)) {
     movq(src1, kScratchRegister);
@@ -1787,9 +1797,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
-  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
   // Setup the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -1801,7 +1809,12 @@
   // Reserve room for entry stack pointer and push the debug marker.
   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   push(Immediate(0));  // saved entry sp, patched before call
-  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+  if (mode == ExitFrame::MODE_DEBUG) {
+    push(Immediate(0));
+  } else {
+    movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+    push(kScratchRegister);
+  }
 
   // Save the frame pointer and the context in top.
   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -1821,7 +1834,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Save the state of all registers to the stack from the memory
   // location. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // TODO(1243899): This should be symmetric to
     // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
     // correct here, but computed for the other call. Very error
@@ -1860,17 +1873,17 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
   // Registers:
   // r15 : argv
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
   // the stack. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // It's okay to clobber register rbx below because we don't need
     // the function pointer after this.
     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
-    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
     lea(rbx, Operand(rbp, kOffset));
     CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
   }
@@ -2085,6 +2098,11 @@
 
 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
                                                Register scratch) {
+  if (FLAG_debug_code) {
+    testq(result_end, Immediate(kObjectAlignmentMask));
+    Check(zero, "Unaligned allocation in new space");
+  }
+
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address();
 
@@ -2226,6 +2244,25 @@
 }
 
 
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+  if (context_chain_length > 0) {
+    // Move up the chain of contexts to the context containing the slot.
+    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+    // Load the function context (which is the incoming, outer context).
+    movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+    for (int i = 1; i < context_chain_length; i++) {
+      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+    }
+    // The context may be an intermediate context, not a function context.
+    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {  // context is the current function context.
+    // The context may be an intermediate context, not a function context.
+    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  }
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4c2f35b..9e7c25c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -98,6 +98,12 @@
 #endif
 
   // ---------------------------------------------------------------------------
+  // Stack limit support
+
+  // Do simple test for stack overflow. This doesn't handle an overflow.
+  void StackLimitCheck(Label* on_stack_limit_hit);
+
+  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -106,16 +112,16 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either EXIT or
-  // EXIT_DEBUG. Expects the number of arguments in register rax and
+  // Enter specific kind of exit frame; either in normal or
+  // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
   // to the first argument in register rsi.
-  void EnterExitFrame(StackFrame::Type type, int result_size = 1);
+  void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
 
   // Leave the current exit frame. Expects/provides the return value in
   // register rax:rdx (untouched) and the pointer to the first
   // argument in register rsi.
-  void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
+  void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
 
 
   // ---------------------------------------------------------------------------
@@ -542,6 +548,9 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+  // Find the function context up the context chain.
+  void LoadContext(Register dst, int context_chain_length);
+
   // ---------------------------------------------------------------------------
   // Runtime calls
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 5d17a2d..639f5e9 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -643,10 +643,10 @@
   Label stack_limit_hit;
   Label stack_ok;
 
-  ExternalReference stack_guard_limit =
-      ExternalReference::address_of_stack_guard_limit();
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit();
   __ movq(rcx, rsp);
-  __ movq(kScratchRegister, stack_guard_limit);
+  __ movq(kScratchRegister, stack_limit);
   __ subq(rcx, Operand(kScratchRegister, 0));
   // Handle it if the stack pointer is already below the stack limit.
   __ j(below_equal, &stack_limit_hit);
@@ -1079,7 +1079,7 @@
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
@@ -1196,9 +1196,9 @@
 void RegExpMacroAssemblerX64::CheckPreemption() {
   // Check for preemption.
   Label no_preempt;
-  ExternalReference stack_guard_limit =
-      ExternalReference::address_of_stack_guard_limit();
-  __ load_rax(stack_guard_limit);
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit();
+  __ load_rax(stack_limit);
   __ cmpq(rsp, rax);
   __ j(above, &no_preempt);
 
@@ -1209,18 +1209,16 @@
 
 
 void RegExpMacroAssemblerX64::CheckStackLimit() {
-  if (FLAG_check_stack) {
-    Label no_stack_overflow;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_regexp_stack_limit();
-    __ load_rax(stack_limit);
-    __ cmpq(backtrack_stackpointer(), rax);
-    __ j(above, &no_stack_overflow);
+  Label no_stack_overflow;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit();
+  __ load_rax(stack_limit);
+  __ cmpq(backtrack_stackpointer(), rax);
+  __ j(above, &no_stack_overflow);
 
-    SafeCall(&stack_overflow_label_);
+  SafeCall(&stack_overflow_label_);
 
-    __ bind(&no_stack_overflow);
-  }
+  __ bind(&no_stack_overflow);
 }
 
 
@@ -1287,11 +1285,6 @@
   }
 }
 
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  __ int3();  // Unused on x64.
-}
-
 #undef __
 
 #endif  // V8_NATIVE_REGEXP
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 998c909..c4f3a85 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -44,6 +44,12 @@
   static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
     return c_limit;
   }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch() { }
 };
 
 // Call the generated regexp code directly. The entry function pointer should
@@ -51,4 +57,7 @@
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
   entry(p0, p1, p2, p3, p4, p5, p6)
 
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  reinterpret_cast<TryCatch*>(try_catch_address)
+
 #endif  // V8_X64_SIMULATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 58a3e0f..55b0b87 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -173,7 +173,7 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
-  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
   // Use only the low 32 bits of the map pointer.
   __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, Immediate(flags));
@@ -183,7 +183,7 @@
   ProbeTable(masm, flags, kPrimary, name, scratch);
 
   // Primary miss: Compute hash for secondary probe.
-  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
   __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, Immediate(flags));
   __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
@@ -323,11 +323,7 @@
 
   // Load length directly from the string.
   __ bind(&load_length);
-  __ and_(scratch, Immediate(kStringSizeMask));
   __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
-  // rcx is also the receiver.
-  __ lea(rcx, Operand(scratch, String::kLongLengthShift));
-  __ shr(rax);  // rcx is implicit shift register.
   __ Integer32ToSmi(rax, rax);
   __ ret(0);
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 781efd1..fe65d34 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -893,16 +893,15 @@
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
 
-  // Emit normal 'push' instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
+  // If positive we have to adjust the stack pointer.
+  int delta = end - stack_pointer_;
+  if (delta > 0) {
+    stack_pointer_ = end;
+    __ subq(rsp, Immediate(delta * kPointerSize));
+  }
+
   for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) {
-      if (i <= stack_pointer_) {
-        SyncElementBelowStackPointer(i);
-      } else {
-        SyncElementByPushing(i);
-      }
-    }
+    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
   }
 }