Merge V8 at r7668: Initial merge by Git.

Change-Id: I1703c8b4f5c63052451a22cf3fb878abc9a0ec75
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 439236a..9541a58 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -393,9 +393,9 @@
     StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
@@ -405,7 +405,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 0744b8a..c06bc0c 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -38,22 +38,38 @@
 // -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
-CpuFeatures::CpuFeatures()
-    : supported_(kDefaultCpuFeatures),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-void CpuFeatures::Probe(bool portable)  {
-  ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
   supported_ = kDefaultCpuFeatures;
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
-  Assembler assm(NULL, 0);
+  const int kBufferSize = 4 * KB;
+  VirtualMemory* memory = new VirtualMemory(kBufferSize);
+  if (!memory->IsReserved()) {
+    delete memory;
+    return;
+  }
+  ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+  if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+    delete memory;
+    return;
+  }
+
+  Assembler assm(NULL, memory->address(), kBufferSize);
   Label cpuid, done;
 #define __ assm.
   // Save old rsp, since we are going to modify the stack.
@@ -83,7 +99,7 @@
   // ecx:edx. Temporarily enable CPUID support because we know it's
   // safe here.
   __ bind(&cpuid);
-  __ movq(rax, Immediate(1));
+  __ movl(rax, Immediate(1));
   supported_ = kDefaultCpuFeatures | (1 << CPUID);
   { Scope fscope(CPUID);
     __ cpuid();
@@ -117,31 +133,20 @@
   __ ret(0);
 #undef __
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Isolate* isolate = Isolate::Current();
-  MaybeObject* maybe_code =
-      isolate->heap()->CreateCode(desc,
-                                  Code::ComputeFlags(Code::STUB),
-                                  Handle<Object>());
-  Object* code;
-  if (!maybe_code->ToObject(&code)) return;
-  if (!code->IsCode()) return;
-  PROFILE(isolate,
-          CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
-  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
   supported_ = probe();
   found_by_runtime_probing_ = supported_;
   found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+  found_by_runtime_probing_ &= ~os_guarantees;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
   ASSERT(IsSupported(CMOV));
+
+  delete memory;
 }
 
 
@@ -339,8 +344,8 @@
 static void InitCoverageLog();
 #endif
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       code_targets_(100),
       positions_recorder_(this),
       emit_debug_code_(FLAG_debug_code) {
@@ -349,7 +354,7 @@
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
-      if (isolate()->assembler_spare_buffer() != NULL) {
+      if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
         buffer = isolate()->assembler_spare_buffer();
         isolate()->set_assembler_spare_buffer(NULL);
       }
@@ -383,7 +388,6 @@
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
 
-  last_pc_ = NULL;
 
 #ifdef GENERATED_CODE_COVERAGE
   InitCoverageLog();
@@ -393,7 +397,8 @@
 
 Assembler::~Assembler() {
   if (own_buffer_) {
-    if (isolate()->assembler_spare_buffer() == NULL &&
+    if (isolate() != NULL &&
+        isolate()->assembler_spare_buffer() == NULL &&
         buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
@@ -438,7 +443,6 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
-  last_pc_ = NULL;
   ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
   if (L->is_linked()) {
     int current = L->pos();
@@ -465,7 +469,6 @@
 
 void Assembler::bind(NearLabel* L) {
   ASSERT(!L->is_bound());
-  last_pc_ = NULL;
   while (L->unresolved_branches_ > 0) {
     int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
     int disp = pc_offset() - branch_pos;
@@ -516,7 +519,8 @@
           reloc_info_writer.pos(), desc.reloc_size);
 
   // Switch buffers.
-  if (isolate()->assembler_spare_buffer() == NULL &&
+  if (isolate() != NULL &&
+      isolate()->assembler_spare_buffer() == NULL &&
       buffer_size_ == kMinimalBufferSize) {
     isolate()->set_assembler_spare_buffer(buffer_);
   } else {
@@ -525,9 +529,6 @@
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
-  if (last_pc_ != NULL) {
-    last_pc_ += pc_delta;
-  }
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
@@ -565,7 +566,6 @@
 
 void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(reg, op);
   emit(opcode);
   emit_operand(reg, op);
@@ -574,7 +574,6 @@
 
 void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4)  {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -591,7 +590,6 @@
 
 void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -612,7 +610,6 @@
                                  Register reg,
                                  const Operand& rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(reg, rm_reg);
   emit(opcode);
@@ -622,7 +619,6 @@
 
 void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -641,7 +637,6 @@
                                  Register reg,
                                  const Operand& rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(reg, rm_reg);
   emit(opcode);
   emit_operand(reg, rm_reg);
@@ -652,7 +647,6 @@
                                         Register dst,
                                         Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -672,7 +666,6 @@
                                         const Operand& dst,
                                         Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -690,7 +683,6 @@
                                            Register dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override prefix.
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
@@ -712,7 +704,6 @@
                                            const Operand& dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override prefix.
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
@@ -731,7 +722,6 @@
                                            Register dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -752,7 +742,6 @@
                                            const Operand& dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -770,7 +759,6 @@
                                           const Operand& dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   ASSERT(is_int8(src.value_) || is_uint8(src.value_));
   emit(0x80);
@@ -783,7 +771,6 @@
                                           Register dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.code() > 3) {
     // Use 64-bit mode byte registers.
     emit_rex_64(dst);
@@ -797,7 +784,6 @@
 
 void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_rex_64(dst);
@@ -814,7 +800,6 @@
 
 void Assembler::shift(Register dst, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xD3);
   emit_modrm(subcode, dst);
@@ -823,7 +808,6 @@
 
 void Assembler::shift_32(Register dst, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xD3);
   emit_modrm(subcode, dst);
@@ -832,7 +816,6 @@
 
 void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_optional_rex_32(dst);
@@ -849,7 +832,6 @@
 
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xA3);
@@ -859,7 +841,6 @@
 
 void Assembler::bts(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xAB);
@@ -870,7 +851,6 @@
 void Assembler::call(Label* L) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   if (L->is_bound()) {
@@ -892,7 +872,6 @@
 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   emit_code_target(target, rmode);
@@ -902,7 +881,6 @@
 void Assembler::call(Register adr) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: FF /2 r64.
   emit_optional_rex_32(adr);
   emit(0xFF);
@@ -913,7 +891,6 @@
 void Assembler::call(const Operand& op) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: FF /2 m64.
   emit_optional_rex_32(op);
   emit(0xFF);
@@ -928,7 +905,6 @@
 void Assembler::call(Address target) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   Address source = pc_ + 4;
@@ -940,19 +916,16 @@
 
 void Assembler::clc() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF8);
 }
 
 void Assembler::cld() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xFC);
 }
 
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x99);
 }
 
@@ -967,7 +940,6 @@
   // 64-bit architecture.
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
@@ -984,7 +956,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
@@ -1001,7 +972,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -1018,7 +988,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -1030,16 +999,14 @@
 void Assembler::cmpb_al(Immediate imm8) {
   ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x3c);
   emit(imm8.value_);
 }
 
 
 void Assembler::cpuid() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x0F);
   emit(0xA2);
 }
@@ -1047,7 +1014,6 @@
 
 void Assembler::cqo() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64();
   emit(0x99);
 }
@@ -1055,7 +1021,6 @@
 
 void Assembler::decq(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_modrm(0x1, dst);
@@ -1064,7 +1029,6 @@
 
 void Assembler::decq(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_operand(1, dst);
@@ -1073,7 +1037,6 @@
 
 void Assembler::decl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_modrm(0x1, dst);
@@ -1082,7 +1045,6 @@
 
 void Assembler::decl(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_operand(1, dst);
@@ -1091,7 +1053,6 @@
 
 void Assembler::decb(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
@@ -1103,7 +1064,6 @@
 
 void Assembler::decb(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFE);
   emit_operand(1, dst);
@@ -1112,7 +1072,6 @@
 
 void Assembler::enter(Immediate size) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xC8);
   emitw(size.value_);  // 16 bit operand, always.
   emit(0);
@@ -1121,14 +1080,12 @@
 
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF4);
 }
 
 
 void Assembler::idivq(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x7, src);
@@ -1137,7 +1094,6 @@
 
 void Assembler::idivl(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0xF7);
   emit_modrm(0x7, src);
@@ -1146,7 +1102,6 @@
 
 void Assembler::imul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x5, src);
@@ -1155,7 +1110,6 @@
 
 void Assembler::imul(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1165,7 +1119,6 @@
 
 void Assembler::imul(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1175,7 +1128,6 @@
 
 void Assembler::imul(Register dst, Register src, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   if (is_int8(imm.value_)) {
     emit(0x6B);
@@ -1191,7 +1143,6 @@
 
 void Assembler::imull(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1201,7 +1152,6 @@
 
 void Assembler::imull(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1211,7 +1161,6 @@
 
 void Assembler::imull(Register dst, Register src, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   if (is_int8(imm.value_)) {
     emit(0x6B);
@@ -1227,7 +1176,6 @@
 
 void Assembler::incq(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_modrm(0x0, dst);
@@ -1236,7 +1184,6 @@
 
 void Assembler::incq(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_operand(0, dst);
@@ -1245,7 +1192,6 @@
 
 void Assembler::incl(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_operand(0, dst);
@@ -1254,7 +1200,6 @@
 
 void Assembler::incl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_modrm(0, dst);
@@ -1263,7 +1208,6 @@
 
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xCC);
 }
 
@@ -1276,7 +1220,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   if (L->is_bound()) {
     const int short_size = 2;
@@ -1314,7 +1257,6 @@
                   Handle<Code> target,
                   RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   // 0000 1111 1000 tttn #32-bit disp.
   emit(0x0F);
@@ -1325,7 +1267,6 @@
 
 void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(0 <= cc && cc < 16);
   if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
   if (L->is_bound()) {
@@ -1346,7 +1287,6 @@
 
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   const int short_size = sizeof(int8_t);
   const int long_size = sizeof(int32_t);
   if (L->is_bound()) {
@@ -1379,7 +1319,6 @@
 
 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1001 #32-bit disp.
   emit(0xE9);
   emit_code_target(target, rmode);
@@ -1388,7 +1327,6 @@
 
 void Assembler::jmp(NearLabel* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (L->is_bound()) {
     const int short_size = sizeof(int8_t);
     int offs = L->pos() - pc_offset();
@@ -1407,7 +1345,6 @@
 
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode FF/4 r64.
   emit_optional_rex_32(target);
   emit(0xFF);
@@ -1417,7 +1354,6 @@
 
 void Assembler::jmp(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode FF/4 m64.
   emit_optional_rex_32(src);
   emit(0xFF);
@@ -1427,7 +1363,6 @@
 
 void Assembler::lea(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x8D);
   emit_operand(dst, src);
@@ -1436,7 +1371,6 @@
 
 void Assembler::leal(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x8D);
   emit_operand(dst, src);
@@ -1445,7 +1379,6 @@
 
 void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x48);  // REX.W
   emit(0xA1);
   emitq(reinterpret_cast<uintptr_t>(value), mode);
@@ -1459,15 +1392,18 @@
 
 void Assembler::leave() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xC9);
 }
 
 
 void Assembler::movb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(dst, src);
+  if (dst.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  } else {
+    emit_optional_rex_32(dst, src);
+  }
   emit(0x8A);
   emit_operand(dst, src);
 }
@@ -1475,18 +1411,21 @@
 
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(dst);
-  emit(0xC6);
-  emit_modrm(0x0, dst);
+  if (dst.code() > 3) {
+    emit_rex_32(dst);
+  }
+  emit(0xB0 + dst.low_bits());
   emit(imm.value_);
 }
 
 
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(src, dst);
+  if (src.code() > 3) {
+    emit_rex_32(src, dst);
+  } else {
+    emit_optional_rex_32(src, dst);
+  }
   emit(0x88);
   emit_operand(src, dst);
 }
@@ -1494,7 +1433,6 @@
 
 void Assembler::movw(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(src, dst);
   emit(0x89);
@@ -1504,7 +1442,6 @@
 
 void Assembler::movl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x8B);
   emit_operand(dst, src);
@@ -1513,7 +1450,6 @@
 
 void Assembler::movl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_optional_rex_32(src, dst);
     emit(0x89);
@@ -1528,7 +1464,6 @@
 
 void Assembler::movl(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src, dst);
   emit(0x89);
   emit_operand(src, dst);
@@ -1537,27 +1472,23 @@
 
 void Assembler::movl(const Operand& dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xC7);
   emit_operand(0x0, dst);
-  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+  emit(value);
 }
 
 
 void Assembler::movl(Register dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
-  emit(0xC7);
-  emit_modrm(0x0, dst);
-  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+  emit(0xB8 + dst.low_bits());
+  emit(value);
 }
 
 
 void Assembler::movq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x8B);
   emit_operand(dst, src);
@@ -1566,7 +1497,6 @@
 
 void Assembler::movq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_64(src, dst);
     emit(0x89);
@@ -1581,7 +1511,6 @@
 
 void Assembler::movq(Register dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xC7);
   emit_modrm(0x0, dst);
@@ -1591,7 +1520,6 @@
 
 void Assembler::movq(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x89);
   emit_operand(src, dst);
@@ -1603,7 +1531,6 @@
   // address is not GC safe. Use the handle version instead.
   ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xB8 | dst.low_bits());
   emitq(reinterpret_cast<uintptr_t>(value), rmode);
@@ -1625,7 +1552,6 @@
     // value.
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xB8 | dst.low_bits());
   emitq(value, rmode);
@@ -1640,7 +1566,6 @@
 
 void Assembler::movq(const Operand& dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xC7);
   emit_operand(0, dst);
@@ -1652,7 +1577,6 @@
 // (as a 32-bit offset sign extended to 64-bit).
 void Assembler::movl(const Operand& dst, Label* src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xC7);
   emit_operand(0, dst);
@@ -1682,7 +1606,6 @@
     movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
   } else {
     EnsureSpace ensure_space(this);
-    last_pc_ = pc_;
     ASSERT(value->IsHeapObject());
     ASSERT(!HEAP->InNewSpace(*value));
     emit_rex_64(dst);
@@ -1694,7 +1617,6 @@
 
 void Assembler::movsxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xBE);
@@ -1704,7 +1626,6 @@
 
 void Assembler::movsxwq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xBF);
@@ -1714,7 +1635,6 @@
 
 void Assembler::movsxlq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x63);
   emit_modrm(dst, src);
@@ -1723,7 +1643,6 @@
 
 void Assembler::movsxlq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x63);
   emit_operand(dst, src);
@@ -1732,7 +1651,6 @@
 
 void Assembler::movzxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1742,7 +1660,6 @@
 
 void Assembler::movzxbl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1752,7 +1669,6 @@
 
 void Assembler::movzxwq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB7);
@@ -1762,7 +1678,6 @@
 
 void Assembler::movzxwl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB7);
@@ -1772,7 +1687,6 @@
 
 void Assembler::repmovsb() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit(0xA4);
 }
@@ -1780,7 +1694,6 @@
 
 void Assembler::repmovsw() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override.
   emit(0xF3);
   emit(0xA4);
@@ -1789,7 +1702,6 @@
 
 void Assembler::repmovsl() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit(0xA5);
 }
@@ -1797,7 +1709,6 @@
 
 void Assembler::repmovsq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_rex_64();
   emit(0xA5);
@@ -1806,7 +1717,6 @@
 
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x4, src);
@@ -1815,7 +1725,6 @@
 
 void Assembler::neg(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_modrm(0x3, dst);
@@ -1824,7 +1733,6 @@
 
 void Assembler::negl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xF7);
   emit_modrm(0x3, dst);
@@ -1833,7 +1741,6 @@
 
 void Assembler::neg(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_operand(3, dst);
@@ -1842,14 +1749,12 @@
 
 void Assembler::nop() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x90);
 }
 
 
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_modrm(0x2, dst);
@@ -1858,7 +1763,6 @@
 
 void Assembler::not_(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_operand(2, dst);
@@ -1867,7 +1771,6 @@
 
 void Assembler::notl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xF7);
   emit_modrm(0x2, dst);
@@ -1892,7 +1795,6 @@
   ASSERT(1 <= n);
   ASSERT(n <= 9);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   switch (n) {
   case 1:
     emit(0x90);
@@ -1963,7 +1865,6 @@
 
 void Assembler::pop(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0x58 | dst.low_bits());
 }
@@ -1971,7 +1872,6 @@
 
 void Assembler::pop(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0x8F);
   emit_operand(0, dst);
@@ -1980,14 +1880,12 @@
 
 void Assembler::popfq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9D);
 }
 
 
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0x50 | src.low_bits());
 }
@@ -1995,7 +1893,6 @@
 
 void Assembler::push(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0xFF);
   emit_operand(6, src);
@@ -2004,7 +1901,6 @@
 
 void Assembler::push(Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (is_int8(value.value_)) {
     emit(0x6A);
     emit(value.value_);  // Emit low byte of value.
@@ -2017,7 +1913,6 @@
 
 void Assembler::push_imm32(int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x68);
   emitl(imm32);
 }
@@ -2025,14 +1920,12 @@
 
 void Assembler::pushfq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9C);
 }
 
 
 void Assembler::rdtsc() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x0F);
   emit(0x31);
 }
@@ -2040,7 +1933,6 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint16(imm16));
   if (imm16 == 0) {
     emit(0xC3);
@@ -2058,7 +1950,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   if (reg.code() > 3) {  // Use x64 byte registers, where different.
     emit_rex_32(reg);
@@ -2071,7 +1962,6 @@
 
 void Assembler::shld(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xA5);
@@ -2081,7 +1971,6 @@
 
 void Assembler::shrd(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xAD);
@@ -2091,7 +1980,6 @@
 
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
     Register other = src.is(rax) ? dst : src;
     emit_rex_64(other);
@@ -2110,7 +1998,6 @@
 
 void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x48);  // REX.W
   emit(0xA3);
   emitq(reinterpret_cast<uintptr_t>(dst), mode);
@@ -2124,7 +2011,6 @@
 
 void Assembler::testb(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_32(src, dst);
     emit(0x84);
@@ -2143,7 +2029,6 @@
 void Assembler::testb(Register reg, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.is(rax)) {
     emit(0xA8);
     emit(mask.value_);  // Low byte emitted.
@@ -2162,7 +2047,6 @@
 void Assembler::testb(const Operand& op, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
   emit(0xF6);
   emit_operand(rax, op);  // Operation code 0
@@ -2172,7 +2056,6 @@
 
 void Assembler::testb(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg, op);
@@ -2186,7 +2069,6 @@
 
 void Assembler::testl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_optional_rex_32(src, dst);
     emit(0x85);
@@ -2206,7 +2088,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.is(rax)) {
     emit(0xA9);
     emit(mask);
@@ -2226,7 +2107,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
   emit(0xF7);
   emit_operand(rax, op);  // Operation code 0
@@ -2236,7 +2116,6 @@
 
 void Assembler::testq(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(reg, op);
   emit(0x85);
   emit_operand(reg, op);
@@ -2245,7 +2124,6 @@
 
 void Assembler::testq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_64(src, dst);
     emit(0x85);
@@ -2260,7 +2138,6 @@
 
 void Assembler::testq(Register dst, Immediate mask) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.is(rax)) {
     emit_rex_64();
     emit(0xA9);
@@ -2279,14 +2156,12 @@
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC0, i);
 }
 
 
 void Assembler::fld1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE8);
 }
@@ -2294,7 +2169,6 @@
 
 void Assembler::fldz() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xEE);
 }
@@ -2302,7 +2176,6 @@
 
 void Assembler::fldpi() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xEB);
 }
@@ -2310,7 +2183,6 @@
 
 void Assembler::fldln2() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xED);
 }
@@ -2318,7 +2190,6 @@
 
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xD9);
   emit_operand(0, adr);
@@ -2327,7 +2198,6 @@
 
 void Assembler::fld_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(0, adr);
@@ -2336,7 +2206,6 @@
 
 void Assembler::fstp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xD9);
   emit_operand(3, adr);
@@ -2345,7 +2214,6 @@
 
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(3, adr);
@@ -2355,14 +2223,12 @@
 void Assembler::fstp(int index) {
   ASSERT(is_uint3(index));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xD8, index);
 }
 
 
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(0, adr);
@@ -2371,7 +2237,6 @@
 
 void Assembler::fild_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
   emit_operand(5, adr);
@@ -2380,7 +2245,6 @@
 
 void Assembler::fistp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(3, adr);
@@ -2388,9 +2252,8 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(1, adr);
@@ -2398,9 +2261,8 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(1, adr);
@@ -2409,7 +2271,6 @@
 
 void Assembler::fist_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(2, adr);
@@ -2418,7 +2279,6 @@
 
 void Assembler::fistp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
   emit_operand(7, adr);
@@ -2427,7 +2287,6 @@
 
 void Assembler::fabs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE1);
 }
@@ -2435,7 +2294,6 @@
 
 void Assembler::fchs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE0);
 }
@@ -2443,7 +2301,6 @@
 
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFF);
 }
@@ -2451,7 +2308,6 @@
 
 void Assembler::fsin() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFE);
 }
@@ -2459,7 +2315,6 @@
 
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF1);
 }
@@ -2467,21 +2322,18 @@
 
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC0, i);
 }
 
 
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xE8, i);
 }
 
 
 void Assembler::fisub_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDA);
   emit_operand(4, adr);
@@ -2490,56 +2342,48 @@
 
 void Assembler::fmul(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC8, i);
 }
 
 
 void Assembler::fdiv(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xF8, i);
 }
 
 
 void Assembler::faddp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC0, i);
 }
 
 
 void Assembler::fsubp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE8, i);
 }
 
 
 void Assembler::fsubrp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE0, i);
 }
 
 
 void Assembler::fmulp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC8, i);
 }
 
 
 void Assembler::fdivp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xF8, i);
 }
 
 
 void Assembler::fprem() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF8);
 }
@@ -2547,7 +2391,6 @@
 
 void Assembler::fprem1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF5);
 }
@@ -2555,14 +2398,12 @@
 
 void Assembler::fxch(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC8, i);
 }
 
 
 void Assembler::fincstp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF7);
 }
@@ -2570,14 +2411,12 @@
 
 void Assembler::ffree(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xC0, i);
 }
 
 
 void Assembler::ftst() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE4);
 }
@@ -2585,14 +2424,12 @@
 
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xE8, i);
 }
 
 
 void Assembler::fucompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDA);
   emit(0xE9);
 }
@@ -2600,7 +2437,6 @@
 
 void Assembler::fucomi(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDB);
   emit(0xE8 + i);
 }
@@ -2608,7 +2444,6 @@
 
 void Assembler::fucomip() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDF);
   emit(0xE9);
 }
@@ -2616,7 +2451,6 @@
 
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDE);
   emit(0xD9);
 }
@@ -2624,7 +2458,6 @@
 
 void Assembler::fnstsw_ax() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDF);
   emit(0xE0);
 }
@@ -2632,14 +2465,12 @@
 
 void Assembler::fwait() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9B);
 }
 
 
 void Assembler::frndint() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFC);
 }
@@ -2647,7 +2478,6 @@
 
 void Assembler::fnclex() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDB);
   emit(0xE2);
 }
@@ -2657,7 +2487,6 @@
   // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
   // in 64-bit mode. Test CpuID.
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9E);
 }
 
@@ -2673,7 +2502,6 @@
 
 void Assembler::movd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2684,7 +2512,6 @@
 
 void Assembler::movd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(src, dst);
   emit(0x0F);
@@ -2695,7 +2522,6 @@
 
 void Assembler::movq(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2706,7 +2532,6 @@
 
 void Assembler::movq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(src, dst);
   emit(0x0F);
@@ -2715,10 +2540,26 @@
 }
 
 
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
+  if (dst.low_bits() == 4) {
+    // Avoid unnecessary SIB byte.
+    emit(0xf3);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x7e);
+    emit_sse_operand(dst, src);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0xD6);
+    emit_sse_operand(src, dst);
+  }
+}
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
   emit(0x66);
   emit_rex_64(src, dst);
   emit(0x0F);
@@ -2728,9 +2569,7 @@
 
 
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2742,7 +2581,6 @@
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
   ASSERT(is_uint2(imm8));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2755,7 +2593,6 @@
 
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(src, dst);
   emit(0x0F);
@@ -2766,7 +2603,6 @@
 
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2777,7 +2613,6 @@
 
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2786,9 +2621,44 @@
 }
 
 
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);  // single
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2799,7 +2669,6 @@
 
 void Assembler::movss(const Operand& src, XMMRegister dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);  // single
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2810,7 +2679,6 @@
 
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2821,7 +2689,6 @@
 
 void Assembler::cvttss2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2832,7 +2699,6 @@
 
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2843,7 +2709,6 @@
 
 void Assembler::cvttsd2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2854,7 +2719,6 @@
 
 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2865,7 +2729,6 @@
 
 void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2876,7 +2739,6 @@
 
 void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2887,7 +2749,6 @@
 
 void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2898,7 +2759,6 @@
 
 void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2909,7 +2769,6 @@
 
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2920,7 +2779,6 @@
 
 void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2931,7 +2789,6 @@
 
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2942,7 +2799,6 @@
 
 void Assembler::cvtsd2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2953,7 +2809,6 @@
 
 void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2964,7 +2819,6 @@
 
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2975,7 +2829,6 @@
 
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2986,7 +2839,6 @@
 
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2997,7 +2849,6 @@
 
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3008,7 +2859,6 @@
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3019,7 +2869,6 @@
 
 void Assembler::orpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3030,7 +2879,6 @@
 
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3039,9 +2887,17 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3052,7 +2908,6 @@
 
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -3063,7 +2918,6 @@
 
 void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -3072,9 +2926,23 @@
 }
 
 
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+                        Assembler::RoundingMode mode) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x3a);
+  emit(0x0b);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  emit(static_cast<byte>(mode) | 0x8);
+}
+
+
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 52aca63..8a9938b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -434,14 +434,15 @@
 //   } else {
 //     // Generate standard x87 or SSE2 floating point code.
 //   }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -449,51 +450,65 @@
     if (f == SAHF && !FLAG_enable_sahf) return false;
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    uint64_t enabled = isolate->enabled_cpu_features();
+    return (enabled & (V8_UINT64_C(1) << f)) != 0;
   }
+#endif
+
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      uint64_t mask = (V8_UINT64_C(1) << f);
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      uint64_t mask = V8_UINT64_C(1) << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-          (cpu_features_->found_by_runtime_probing_ & mask) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= mask;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = isolate_->enabled_cpu_features();
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    uint64_t old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    uint64_t old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
- private:
-  CpuFeatures();
 
+ private:
   // Safe defaults include SSE2 and CMOV for X64. It is always available, if
   // anyone checks, but they shouldn't need to check.
   // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
   //   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
   static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
 
-  uint64_t supported_;
-  uint64_t enabled_;
-  uint64_t found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static uint64_t supported_;
+  static uint64_t found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -526,7 +541,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -1276,15 +1291,24 @@
   void movd(Register dst, XMMRegister src);
   void movq(XMMRegister dst, Register src);
   void movq(Register dst, XMMRegister src);
+  void movq(XMMRegister dst, XMMRegister src);
   void extractps(Register dst, XMMRegister src, byte imm8);
 
-  void movsd(const Operand& dst, XMMRegister src);
+  // Don't use this unless it's important to keep the
+  // top half of the destination register unchanged.
+  // Used movaps when moving double values and movq for integer
+  // values in xmm registers.
   void movsd(XMMRegister dst, XMMRegister src);
+
+  void movsd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, const Operand& src);
 
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqa(XMMRegister dst, const Operand& src);
 
+  void movapd(XMMRegister dst, XMMRegister src);
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
 
@@ -1316,11 +1340,21 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
 
+  enum RoundingMode {
+    kRoundToNearest = 0x0,
+    kRoundDown      = 0x1,
+    kRoundUp        = 0x2,
+    kRoundToZero    = 0x3
+  };
+
+  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
   void movmskpd(Register dst, XMMRegister src);
 
   // The first argument is the reg field, the second argument is the r/m field.
@@ -1574,8 +1608,6 @@
   RelocInfoWriter reloc_info_writer;
 
   List< Handle<Code> > code_targets_;
-  // push-pop elimination
-  byte* last_pc_;
 
   PositionsRecorder positions_recorder_;
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 21d3e54..a549633 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
 
@@ -96,7 +96,7 @@
   // rax: number of arguments
   __ bind(&non_function_call);
   // Set expected number of arguments to zero (not changing rax).
-  __ movq(rbx, Immediate(0));
+  __ Set(rbx, 0);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -1372,7 +1372,7 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
+    __ Set(rcx, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
@@ -1391,7 +1391,7 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
+    __ Set(rcx, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0fb827b..76fcc88 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -266,14 +266,14 @@
   __ j(not_equal, &true_result);
   // HeapNumber => false iff +0, -0, or NaN.
   // These three cases set the zero flag when compared to zero using ucomisd.
-  __ xorpd(xmm0, xmm0);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
   __ j(zero, &false_result);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in rax.
   __ bind(&true_result);
-  __ movq(rax, Immediate(1));
+  __ Set(rax, 1);
   __ ret(1 * kPointerSize);
   __ bind(&false_result);
   __ Set(rax, 0);
@@ -281,166 +281,6 @@
 }
 
 
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
-               op_name,
-               overwrite_name,
-               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
-               args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "",
-               static_operands_type_.ToString(),
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (!(left.is(left_arg) && right.is(right_arg))) {
-      if (left.is(right_arg) && right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          SetArgsReversed();
-        } else {
-          __ xchg(left, right);
-        }
-      } else if (left.is(left_arg)) {
-        __ movq(right_arg, right);
-      } else if (right.is(right_arg)) {
-        __ movq(left_arg, left);
-      } else if (left.is(right_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(left_arg, right);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying left argument.
-          __ movq(left_arg, left);
-          __ movq(right_arg, right);
-        }
-      } else if (right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(right_arg, left);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying right argument.
-          __ movq(right_arg, right);
-          __ movq(left_arg, left);
-        }
-      } else {
-        // Order of moves is not important.
-        __ movq(left_arg, left);
-        __ movq(right_arg, right);
-      }
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Smi* right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ Push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (left.is(left_arg)) {
-      __ Move(right_arg, right);
-    } else if (left.is(right_arg) && IsOperationCommutative()) {
-      __ Move(left_arg, right);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, left and right_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite left before moving
-      // it to left_arg.
-      __ movq(left_arg, left);
-      __ Move(right_arg, right);
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Smi* left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ Push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (right.is(right_arg)) {
-      __ Move(left_arg, left);
-    } else if (right.is(left_arg) && IsOperationCommutative()) {
-      __ Move(right_arg, left);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, right and left_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite right before moving
-      // it to right_arg.
-      __ movq(right_arg, right);
-      __ Move(left_arg, left);
-    }
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
 class FloatingPointHelper : public AllStatic {
  public:
   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
@@ -460,561 +300,28 @@
   // As above, but we know the operands to be numbers. In that case,
   // conversion can't fail.
   static void LoadNumbersAsIntegers(MacroAssembler* masm);
+
+  // Tries to convert two values to smis losslessly.
+  // This fails if either argument is not a Smi nor a HeapNumber,
+  // or if it's a HeapNumber with a value that can't be converted
+  // losslessly to a Smi. In that case, control transitions to the
+  // on_not_smis label.
+  // On success, either control goes to the on_success label (if one is
+  // provided), or it falls through at the end of the code (if on_success
+  // is NULL).
+  // On success, both first and second holds Smi tagged values.
+  // One of first or second must be non-Smi when entering.
+  static void NumbersToSmis(MacroAssembler* masm,
+                            Register first,
+                            Register second,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Label* on_success,
+                            Label* on_not_smis);
 };
 
 
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
-  // dividend in rax and rdx free for the division.  Use rax, rbx for those.
-  Comment load_comment(masm, "-- Load arguments");
-  Register left = rdx;
-  Register right = rax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    left = rax;
-    right = rbx;
-    if (HasArgsInRegisters()) {
-      __ movq(rbx, rax);
-      __ movq(rax, rdx);
-    }
-  }
-  if (!HasArgsInRegisters()) {
-    __ movq(right, Operand(rsp, 1 * kPointerSize));
-    __ movq(left, Operand(rsp, 2 * kPointerSize));
-  }
-
-  Label not_smis;
-  // 2. Smi check both operands.
-  if (static_operands_type_.IsSmi()) {
-    // Skip smi check if we know that both arguments are smis.
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    if (op_ == Token::BIT_OR) {
-      // Handle OR here, since we do extra smi-checking in the or code below.
-      __ SmiOr(right, right, left);
-      GenerateReturn(masm);
-      return;
-    }
-  } else {
-    if (op_ != Token::BIT_OR) {
-      // Skip the check for OR as it is better combined with the
-      // actual operation.
-      Comment smi_check_comment(masm, "-- Smi check arguments");
-      __ JumpIfNotBothSmi(left, right, &not_smis);
-    }
-  }
-
-  // 3. Operands are both smis (except for OR), perform the operation leaving
-  // the result in rax and check the result if necessary.
-  Comment perform_smi(masm, "-- Perform smi operation");
-  Label use_fp_on_smis;
-  switch (op_) {
-    case Token::ADD: {
-      ASSERT(right.is(rax));
-      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
-      break;
-    }
-
-    case Token::SUB: {
-      __ SmiSub(left, left, right, &use_fp_on_smis);
-      __ movq(rax, left);
-      break;
-    }
-
-    case Token::MUL:
-      ASSERT(right.is(rax));
-      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
-      break;
-
-    case Token::DIV:
-      ASSERT(left.is(rax));
-      __ SmiDiv(left, left, right, &use_fp_on_smis);
-      break;
-
-    case Token::MOD:
-      ASSERT(left.is(rax));
-      __ SmiMod(left, left, right, slow);
-      break;
-
-    case Token::BIT_OR:
-      ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ testb(right, Immediate(kSmiTagMask));
-      __ j(not_zero, &not_smis);
-      break;
-
-    case Token::BIT_AND:
-      ASSERT(right.is(rax));
-      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
-      break;
-
-    case Token::BIT_XOR:
-      ASSERT(right.is(rax));
-      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
-      break;
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      switch (op_) {
-        case Token::SAR:
-          __ SmiShiftArithmeticRight(left, left, right);
-          break;
-        case Token::SHR:
-          __ SmiShiftLogicalRight(left, left, right, slow);
-          break;
-        case Token::SHL:
-          __ SmiShiftLeft(left, left, right);
-          break;
-        default:
-          UNREACHABLE();
-      }
-      __ movq(rax, left);
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // 4. Emit return of result in rax.
-  GenerateReturn(masm);
-
-  // 5. For some operations emit inline code to perform floating point
-  // operations on known smis (e.g., if the result of the operation
-  // overflowed the smi range).
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      ASSERT(use_fp_on_smis.is_linked());
-      __ bind(&use_fp_on_smis);
-      if (op_ == Token::DIV) {
-        __ movq(rdx, rax);
-        __ movq(rax, rbx);
-      }
-      // left is rdx, right is rax.
-      __ AllocateHeapNumber(rbx, rcx, slow);
-      FloatingPointHelper::LoadSSE2SmiOperands(masm);
-      switch (op_) {
-        case Token::ADD: __ addsd(xmm0, xmm1); break;
-        case Token::SUB: __ subsd(xmm0, xmm1); break;
-        case Token::MUL: __ mulsd(xmm0, xmm1); break;
-        case Token::DIV: __ divsd(xmm0, xmm1); break;
-        default: UNREACHABLE();
-      }
-      __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-      __ movq(rax, rbx);
-      GenerateReturn(masm);
-    }
-    default:
-      break;
-  }
-
-  // 6. Non-smi operands, fall out to the non-smi code with the operands in
-  // rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
-  __ bind(&not_smis);
-
-  switch (op_) {
-    case Token::DIV:
-    case Token::MOD:
-      // Operands are in rax, rbx at this point.
-      __ movq(rdx, rax);
-      __ movq(rax, rbx);
-      break;
-
-    case Token::BIT_OR:
-      // Right operand is saved in rcx and rax was destroyed by the smi
-      // operation.
-      __ movq(rax, rcx);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
-
-  if (ShouldGenerateSmiCode()) {
-    GenerateSmiCode(masm, &call_runtime);
-  } else if (op_ != Token::MOD) {
-    if (!HasArgsInRegisters()) {
-      GenerateLoadArguments(masm);
-    }
-  }
-  // Floating point case.
-  if (ShouldGenerateFPCode()) {
-    switch (op_) {
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV: {
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-smi argument occurs
-          // (and only if smi code is generated). This is the right moment to
-          // patch to HEAP_NUMBERS state. The transition is attempted only for
-          // the four basic operations. The stub stays in the DEFAULT state
-          // forever for all other operations (also if smi code is skipped).
-          GenerateTypeTransition(masm);
-          break;
-        }
-
-        Label not_floats;
-        // rax: y
-        // rdx: x
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadSSE2NumberOperands(masm);
-        } else {
-          FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
-        }
-
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        // Allocate a heap number, if needed.
-        Label skip_allocation;
-        OverwriteMode mode = mode_;
-        if (HasArgsReversed()) {
-          if (mode == OVERWRITE_RIGHT) {
-            mode = OVERWRITE_LEFT;
-          } else if (mode == OVERWRITE_LEFT) {
-            mode = OVERWRITE_RIGHT;
-          }
-        }
-        switch (mode) {
-          case OVERWRITE_LEFT:
-            __ JumpIfNotSmi(rdx, &skip_allocation);
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rdx, rbx);
-            __ bind(&skip_allocation);
-            __ movq(rax, rdx);
-            break;
-          case OVERWRITE_RIGHT:
-            // If the argument in rax is already an object, we skip the
-            // allocation of a heap number.
-            __ JumpIfNotSmi(rax, &skip_allocation);
-            // Fall through!
-          case NO_OVERWRITE:
-            // Allocate a heap number for the result. Keep rax and rdx intact
-            // for the possible runtime call.
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rax, rbx);
-            __ bind(&skip_allocation);
-            break;
-          default: UNREACHABLE();
-        }
-        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-        GenerateReturn(masm);
-        __ bind(&not_floats);
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            !HasSmiCodeInStub()) {
-            // Execution reaches this point when the first non-number argument
-            // occurs (and only if smi code is skipped from the stub, otherwise
-            // the patching has already been done earlier in this case branch).
-            // A perfect moment to try patching to STRINGS for ADD operation.
-            if (op_ == Token::ADD) {
-              GenerateTypeTransition(masm);
-            }
-        }
-        break;
-      }
-      case Token::MOD: {
-        // For MOD we go directly to runtime in the non-smi case.
-        break;
-      }
-      case Token::BIT_OR:
-      case Token::BIT_AND:
-      case Token::BIT_XOR:
-      case Token::SAR:
-      case Token::SHL:
-      case Token::SHR: {
-        Label skip_allocation, non_smi_shr_result;
-        Register heap_number_map = r9;
-        __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadNumbersAsIntegers(masm);
-        } else {
-          FloatingPointHelper::LoadAsIntegers(masm,
-                                              &call_runtime,
-                                              heap_number_map);
-        }
-        switch (op_) {
-          case Token::BIT_OR:  __ orl(rax, rcx); break;
-          case Token::BIT_AND: __ andl(rax, rcx); break;
-          case Token::BIT_XOR: __ xorl(rax, rcx); break;
-          case Token::SAR: __ sarl_cl(rax); break;
-          case Token::SHL: __ shll_cl(rax); break;
-          case Token::SHR: {
-            __ shrl_cl(rax);
-            // Check if result is negative. This can only happen for a shift
-            // by zero.
-            __ testl(rax, rax);
-            __ j(negative, &non_smi_shr_result);
-            break;
-          }
-          default: UNREACHABLE();
-        }
-
-        STATIC_ASSERT(kSmiValueSize == 32);
-        // Tag smi result and return.
-        __ Integer32ToSmi(rax, rax);
-        GenerateReturn(masm);
-
-        // All bit-ops except SHR return a signed int32 that can be
-        // returned immediately as a smi.
-        // We might need to allocate a HeapNumber if we shift a negative
-        // number right by zero (i.e., convert to UInt32).
-        if (op_ == Token::SHR) {
-          ASSERT(non_smi_shr_result.is_linked());
-          __ bind(&non_smi_shr_result);
-          // Allocate a heap number if needed.
-          __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
-          switch (mode_) {
-            case OVERWRITE_LEFT:
-            case OVERWRITE_RIGHT:
-              // If the operand was an object, we skip the
-              // allocation of a heap number.
-              __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
-                                   1 * kPointerSize : 2 * kPointerSize));
-              __ JumpIfNotSmi(rax, &skip_allocation);
-              // Fall through!
-            case NO_OVERWRITE:
-              // Allocate heap number in new space.
-              // Not using AllocateHeapNumber macro in order to reuse
-              // already loaded heap_number_map.
-              __ AllocateInNewSpace(HeapNumber::kSize,
-                                    rax,
-                                    rcx,
-                                    no_reg,
-                                    &call_runtime,
-                                    TAG_OBJECT);
-              // Set the map.
-              if (FLAG_debug_code) {
-                __ AbortIfNotRootValue(heap_number_map,
-                                       Heap::kHeapNumberMapRootIndex,
-                                       "HeapNumberMap register clobbered.");
-              }
-              __ movq(FieldOperand(rax, HeapObject::kMapOffset),
-                      heap_number_map);
-              __ bind(&skip_allocation);
-              break;
-            default: UNREACHABLE();
-          }
-          // Store the result in the HeapNumber and return.
-          __ cvtqsi2sd(xmm0, rbx);
-          __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-          GenerateReturn(masm);
-        }
-
-        break;
-      }
-      default: UNREACHABLE(); break;
-    }
-  }
-
-  // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-  __ bind(&call_runtime);
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      // Registers containing left and right operands respectively.
-      Register lhs, rhs;
-
-      if (HasArgsReversed()) {
-        lhs = rax;
-        rhs = rdx;
-      } else {
-        lhs = rdx;
-        rhs = rax;
-      }
-
-      // Test for string arguments before calling runtime.
-      Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in rdx and rax.
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
-      Condition is_smi;
-      is_smi = masm->CheckSmi(lhs);
-      __ j(is_smi, &not_string1);
-      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
-      __ j(above_equal, &not_string1);
-
-      // First argument is a a string, test second.
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &string1_smi2);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
-      __ j(above_equal, &string1);
-
-      // First and second argument are strings.
-      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&string_add_stub);
-
-      __ bind(&string1_smi2);
-      // First argument is a string, second is a smi. Try to lookup the number
-      // string for the smi in the number string cache.
-      NumberToStringStub::GenerateLookupNumberStringCache(
-          masm, rhs, rbx, rcx, r8, true, &string1);
-
-      // Replace second argument on stack and tailcall string add stub to make
-      // the result.
-      __ movq(Operand(rsp, 1 * kPointerSize), rbx);
-      __ TailCallStub(&string_add_stub);
-
-      // Only first argument is a string.
-      __ bind(&string1);
-      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
-      // First argument was not a string, test second.
-      __ bind(&not_string1);
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &not_strings);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
-      __ j(above_equal, &not_strings);
-
-      // Only second argument is a string.
-      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
-      __ bind(&not_strings);
-      // Neither argument is a string.
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
-    case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
-    case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
-    case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  ASSERT(!HasArgsInRegisters());
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
-  __ pop(rcx);
-  if (HasArgsReversed()) {
-    __ push(rax);
-    __ push(rdx);
-  } else {
-    __ push(rdx);
-    __ push(rax);
-  }
-  __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  Label get_result;
-
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  // Left and right arguments are already on stack.
-  __ pop(rcx);  // Save the return address.
-
-  // Push this stub's key.
-  __ Push(Smi::FromInt(MinorKey()));
-
-  // Although the operation and the type info are encoded into the key,
-  // the encoding is opaque, so push them too.
-  __ Push(Smi::FromInt(op_));
-
-  __ Push(Smi::FromInt(runtime_operands_type_));
-
-  __ push(rcx);  // The return address.
-
-  // Perform patching to an appropriate fast case and return the result.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
@@ -1065,6 +372,9 @@
     case TRBinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
+    case TRBinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
     case TRBinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
@@ -1105,29 +415,30 @@
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
 
-  // We only generate heapnumber answers for overflowing calculations
-  // for the four basic arithmetic operations.
-  bool generate_inline_heapnumber_results =
-      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
-      (op_ == Token::ADD || op_ == Token::SUB ||
-       op_ == Token::MUL || op_ == Token::DIV);
-
   // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
   Register left = rdx;
   Register right = rax;
 
+  // We only generate heapnumber answers for overflowing calculations
+  // for the four basic arithmetic operations and logical right shift by 0.
+  bool generate_inline_heapnumber_results =
+      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+      (op_ == Token::ADD || op_ == Token::SUB ||
+       op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
 
   // Smi check of both operands.  If op is BIT_OR, the check is delayed
   // until after the OR operation.
   Label not_smis;
   Label use_fp_on_smis;
-  Label restore_MOD_registers;  // Only used if op_ == Token::MOD.
+  Label fail;
 
   if (op_ != Token::BIT_OR) {
     Comment smi_check_comment(masm, "-- Smi check arguments");
     __ JumpIfNotBothSmi(left, right, &not_smis);
   }
 
+  Label smi_values;
+  __ bind(&smi_values);
   // Perform the operation.
   Comment perform_smi(masm, "-- Perform smi operation");
   switch (op_) {
@@ -1166,9 +477,7 @@
 
     case Token::BIT_OR: {
       ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ JumpIfNotSmi(right, &not_smis);  // Test delayed until after BIT_OR.
+      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
       break;
       }
     case Token::BIT_XOR:
@@ -1192,7 +501,7 @@
       break;
 
     case Token::SHR:
-      __ SmiShiftLogicalRight(left, left, right, &not_smis);
+      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
       __ movq(rax, left);
       break;
 
@@ -1203,41 +512,52 @@
   // 5. Emit return of result in rax.  Some operations have registers pushed.
   __ ret(0);
 
-  // 6. For some operations emit inline code to perform floating point
-  //    operations on known smis (e.g., if the result of the operation
-  //    overflowed the smi range).
-  __ bind(&use_fp_on_smis);
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    // Restore left and right to rdx and rax.
-    __ movq(rdx, rcx);
-    __ movq(rax, rbx);
-  }
-
-
-  if (generate_inline_heapnumber_results) {
-    __ AllocateHeapNumber(rcx, rbx, slow);
-    Comment perform_float(masm, "-- Perform float operation on smis");
-    FloatingPointHelper::LoadSSE2SmiOperands(masm);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
+  if (use_fp_on_smis.is_linked()) {
+    // 6. For some operations emit inline code to perform floating point
+    //    operations on known smis (e.g., if the result of the operation
+    //    overflowed the smi range).
+    __ bind(&use_fp_on_smis);
+    if (op_ == Token::DIV || op_ == Token::MOD) {
+      // Restore left and right to rdx and rax.
+      __ movq(rdx, rcx);
+      __ movq(rax, rbx);
     }
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
+
+    if (generate_inline_heapnumber_results) {
+      __ AllocateHeapNumber(rcx, rbx, slow);
+      Comment perform_float(masm, "-- Perform float operation on smis");
+      if (op_ == Token::SHR) {
+        __ SmiToInteger32(left, left);
+        __ cvtqsi2sd(xmm0, left);
+      } else {
+        FloatingPointHelper::LoadSSE2SmiOperands(masm);
+        switch (op_) {
+        case Token::ADD: __ addsd(xmm0, xmm1); break;
+        case Token::SUB: __ subsd(xmm0, xmm1); break;
+        case Token::MUL: __ mulsd(xmm0, xmm1); break;
+        case Token::DIV: __ divsd(xmm0, xmm1); break;
+        default: UNREACHABLE();
+        }
+      }
+      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+      __ movq(rax, rcx);
+      __ ret(0);
+    } else {
+      __ jmp(&fail);
+    }
   }
 
   // 7. Non-smi operands reach the end of the code generated by
   //    GenerateSmiCode, and fall through to subsequent code,
   //    with the operands in rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
+  //    But first we check if non-smi values are HeapNumbers holding
+  //    values that could be smi.
   __ bind(&not_smis);
-  if (op_ == Token::BIT_OR) {
-    __ movq(right, rcx);
-  }
+  Comment done_comment(masm, "-- Enter non-smi code");
+  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+                                     &smi_values, &fail);
+  __ jmp(&smi_values);
+  __ bind(&fail);
 }
 
 
@@ -1422,12 +742,25 @@
 
 
 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  Label not_smi;
+  Label call_runtime;
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
 
-  GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
-
-  __ bind(&not_smi);
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
   GenerateTypeTransition(masm);
+
+  if (call_runtime.is_linked()) {
+    __ bind(&call_runtime);
+    GenerateCallRuntimeCode(masm);
+  }
 }
 
 
@@ -1441,6 +774,36 @@
 }
 
 
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
@@ -1951,7 +1314,7 @@
   __ bind(&check_undefined_arg1);
   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, conversion_failure);
-  __ movl(r8, Immediate(0));
+  __ Set(r8, 0);
   __ jmp(&load_arg2);
 
   __ bind(&arg1_is_object);
@@ -1971,7 +1334,7 @@
   __ bind(&check_undefined_arg2);
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, conversion_failure);
-  __ movl(rcx, Immediate(0));
+  __ Set(rcx, 0);
   __ jmp(&done);
 
   __ bind(&arg2_is_object);
@@ -2046,6 +1409,62 @@
 }
 
 
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+                                        Register first,
+                                        Register second,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Label* on_success,
+                                        Label* on_not_smis)   {
+  Register heap_number_map = scratch3;
+  Register smi_result = scratch1;
+  Label done;
+
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  NearLabel first_smi, check_second;
+  __ JumpIfSmi(first, &first_smi);
+  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert HeapNumber to smi if possible.
+  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  // Check if conversion was successful by converting back and
+  // comparing to the original double's bits.
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(first, smi_result);
+
+  __ bind(&check_second);
+  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+  __ bind(&first_smi);
+  if (FLAG_debug_code) {
+    // Second should be non-smi if we get here.
+    __ AbortIfSmi(second);
+  }
+  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert second to smi, if possible.
+  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(second, smi_result);
+  if (on_success != NULL) {
+    __ jmp(on_success);
+  } else {
+    __ bind(&done);
+  }
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done;
 
@@ -2072,7 +1491,7 @@
     __ j(not_equal, &slow);
     // Operand is a float, negate its value by flipping sign bit.
     __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
-    __ movq(kScratchRegister, Immediate(0x01));
+    __ Set(kScratchRegister, 0x01);
     __ shl(kScratchRegister, Immediate(63));
     __ xor_(rdx, kScratchRegister);  // Flip sign.
     // rdx is value to store.
@@ -2144,7 +1563,7 @@
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
   // Save 1 in xmm3 - we need this several times later on.
-  __ movl(rcx, Immediate(1));
+  __ Set(rcx, 1);
   __ cvtlsi2sd(xmm3, rcx);
 
   Label exponent_nonsmi;
@@ -2183,7 +1602,7 @@
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   NearLabel while_true;
   NearLabel no_multiply;
 
@@ -2201,8 +1620,8 @@
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ xorpd(xmm0, xmm0);
+  __ movaps(xmm1, xmm3);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
 
@@ -2250,11 +1669,11 @@
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   __ jmp(&allocate_return);
 
   // Test for 0.5.
@@ -2267,8 +1686,8 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
   __ sqrtsd(xmm1, xmm1);
 
   __ bind(&allocate_return);
@@ -2944,9 +2363,10 @@
   // Heap::GetNumberStringCache.
   Label is_smi;
   Label load_result_from_cache;
+  Factory* factory = masm->isolate()->factory();
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
+    __ CheckMap(object, factory->heap_number_map(), not_found, true);
 
     STATIC_ASSERT(8 == kDoubleSize);
     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2961,8 +2381,6 @@
                          times_1,
                          FixedArray::kHeaderSize));
     __ JumpIfSmi(probe, not_found);
-    ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
-    CpuFeatures::Scope fscope(SSE2);
     __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
     __ ucomisd(xmm0, xmm1);
@@ -3035,6 +2453,7 @@
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
   Label check_unequal_objects, done;
+  Factory* factory = masm->isolate()->factory();
 
   // Compare two smis if required.
   if (include_smi_compare_) {
@@ -3082,7 +2501,6 @@
     // Note: if cc_ != equal, never_nan_nan_ is not used.
     // We cannot set rax to EQUAL until just before return because
     // rax must be unchanged on jump to not_identical.
-
     if (never_nan_nan_ && (cc_ == equal)) {
       __ Set(rax, EQUAL);
       __ ret(0);
@@ -3090,7 +2508,7 @@
       NearLabel heap_number;
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-             FACTORY->heap_number_map());
+             factory->heap_number_map());
       __ j(equal, &heap_number);
       if (cc_ != equal) {
         // Call runtime on identical JSObjects.  Otherwise return equal.
@@ -3135,7 +2553,7 @@
 
         // Check if the non-smi operand is a heap number.
         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
+               factory->heap_number_map());
         // If heap number, handle it in the slow case.
         __ j(equal, &slow);
         // Return non-equal.  ebx (the lower half of rbx) is not zero.
@@ -3761,10 +3179,10 @@
   // is and instance of the function and anything else to
   // indicate that the value is not an instance.
 
-  static const int kOffsetToMapCheckValue = 5;
-  static const int kOffsetToResultValue = 21;
+  static const int kOffsetToMapCheckValue = 2;
+  static const int kOffsetToResultValue = 18;
   // The last 4 bytes of the instruction sequence
-  //   movq(rax, FieldOperand(rdi, HeapObject::kMapOffset)
+  //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
   //   Move(kScratchRegister, FACTORY->the_hole_value())
   // in front of the hole value address.
   static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
@@ -3830,7 +3248,7 @@
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
   }
 
@@ -3867,9 +3285,9 @@
     if (FLAG_debug_code) {
       __ movl(rax, Immediate(kWordBeforeResultValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
     }
-    __ xorl(rax, rax);
+    __ Set(rax, 0);
   }
   __ ret(2 * kPointerSize + extra_stack_space);
 
@@ -4066,10 +3484,11 @@
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
+  Factory* factory = masm->isolate()->factory();
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
-  __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
+  __ CheckMap(index_, factory->heap_number_map(), index_not_number_, true);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -4728,7 +4147,7 @@
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
   __ j(not_zero, &hash_not_zero);
-  __ movl(hash, Immediate(27));
+  __ Set(hash, 27);
   __ bind(&hash_not_zero);
 }
 
@@ -4924,7 +4343,7 @@
     // Use scratch3 as loop index, min_length as limit and scratch2
     // for computation.
     const Register index = scratch3;
-    __ movl(index, Immediate(0));  // Index into strings.
+    __ Set(index, 0);  // Index into strings.
     __ bind(&loop);
     // Compare characters.
     // TODO(lrn): Could we load more than one character at a time?
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 246650a..3b40280 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -71,145 +71,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      GenericBinaryFlags flags,
-                      TypeInfo operands_type = TypeInfo::Unknown())
-      : op_(op),
-        mode_(mode),
-        flags_(flags),
-        args_in_registers_(false),
-        args_reversed_(false),
-        static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
-        name_(NULL) {
-    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
-  }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        flags_(FlagBits::decode(key)),
-        args_in_registers_(ArgsInRegistersBits::decode(key)),
-        args_reversed_(ArgsReversedBits::decode(key)),
-        static_operands_type_(TypeInfo::ExpandedRepresentation(
-            StaticTypeInfoBits::decode(key))),
-        runtime_operands_type_(runtime_operands_type),
-        name_(NULL) {
-  }
-
-  // Generate code to call the stub with the supplied arguments. This will add
-  // code at the call site to prepare arguments either in registers or on the
-  // stack together with the actual call.
-  void GenerateCall(MacroAssembler* masm, Register left, Register right);
-  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
-  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
-  bool ArgsInRegistersSupported() {
-    return (op_ == Token::ADD) || (op_ == Token::SUB)
-        || (op_ == Token::MUL) || (op_ == Token::DIV);
-  }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  GenericBinaryFlags flags_;
-  bool args_in_registers_;  // Arguments passed in registers not on the stack.
-  bool args_reversed_;  // Left and right argument are swapped.
-
-  // Number type information of operands, determined by code generator.
-  TypeInfo static_operands_type_;
-
-  // Operand type information determined at runtime.
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           static_cast<int>(flags_),
-           static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_),
-           static_operands_type_.ToString());
-  }
-#endif
-
-  // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 7> {};
-  class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
-  class ArgsReversedBits: public BitField<bool, 10, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 12, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | FlagBits::encode(flags_)
-           | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_)
-           | StaticTypeInfoBits::encode(
-               static_operands_type_.ThreeBitRepresentation())
-           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
-  }
-
-  void Generate(MacroAssembler* masm);
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-  void GenerateLoadArguments(MacroAssembler* masm);
-  void GenerateReturn(MacroAssembler* masm);
-  void GenerateRegisterArgsPush(MacroAssembler* masm);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  bool IsOperationCommutative() {
-    return (op_ == Token::ADD) || (op_ == Token::MUL);
-  }
-
-  void SetArgsInRegisters() { args_in_registers_ = true; }
-  void SetArgsReversed() { args_reversed_ = true; }
-  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgsInRegisters() { return args_in_registers_; }
-  bool HasArgsReversed() { return args_reversed_; }
-
-  bool ShouldGenerateSmiCode() {
-    return HasSmiCodeInStub() &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  friend class CodeGenerator;
-  friend class LCodeGen;
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -291,6 +152,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
 
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
deleted file mode 100644
index 53caf91..0000000
--- a/src/x64/codegen-x64-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 8c338fe..f8f2d6e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,81 +29,14 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ push(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
-      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
-    }
-  }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
-  // Restore registers in reverse order due to the stack.
-  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ pop(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore) {
-      action &= ~kSyncedFlag;
-      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
-    }
-  }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
-  frame_state_.Restore(masm_);
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  frame_state_->Restore(masm);
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -114,8639 +47,6 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      destination_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
-                           ControlDestination* destination)
-    : owner_(owner),
-      destination_(destination),
-      previous_(owner->state()) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  set_in_spilled_code(false);
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  Isolate::Current()->set_jump_target_compiling_deferred_code(false);
-
-  {
-    CodeGenState state(this);
-    // Entry:
-    // Stack: receiver, arguments, return address.
-    // rbp: caller's frame pointer
-    // rsp: stack pointer
-    // rdi: called JS function
-    // rsi: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ int3();
-    }
-#endif
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Allocate the local context if needed.
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      Comment cmnt(masm_, "[ allocate local context");
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      frame_->PushFunction();
-      Result context;
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        context = frame_->CallStub(&stub, 1);
-      } else {
-        context = frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-      // Update context local.
-      frame_->SaveContextRegister();
-
-      // Verify that the runtime call result and rsi agree.
-      if (FLAG_debug_code) {
-        __ cmpq(context.reg(), rsi);
-        __ Assert(equal, "Runtime::NewContext should end up in rsi");
-      }
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // The use of SlotOperand below is safe in unspilled code
-          // because the slot is guaranteed to be a context slot.
-          //
-          // There are no parameters in the global scope.
-          ASSERT(!scope()->is_global_scope());
-          frame_->PushParameterAt(i);
-          Result value = frame_->Pop();
-          value.ToRegister();
-
-          // SlotOperand loads context.reg() with the context object
-          // stored to, used below in RecordWrite.
-          Result context = allocator_->Allocate();
-          ASSERT(context.is_valid());
-          __ movq(SlotOperand(slot, context.reg()), value.reg());
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          Result scratch = allocator_->Allocate();
-          ASSERT(scratch.is_valid());
-          frame_->Spill(context.reg());
-          frame_->Spill(value.reg());
-          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->Push(FACTORY->the_hole_value());
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-    CheckStack();
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-
-      // Handle the return from the function.
-      if (has_valid_frame()) {
-        // If there is a valid frame, control flow can fall off the end of
-        // the body.  In that case there is an implicit return statement.
-        ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(info->function());
-        frame_->PrepareForReturn();
-        Result undefined(FACTORY->undefined_value());
-        if (function_return_.is_bound()) {
-          function_return_.Jump(&undefined);
-        } else {
-          function_return_.Bind(&undefined);
-          GenerateReturnSequence(&undefined);
-        }
-      } else if (function_return_.is_linked()) {
-        // If the return target has dangling jumps to it, then we have not
-        // yet generated the return sequence.  This can happen when (a)
-        // control does not flow off the end of the body so we did not
-        // compile an artificial return statement just above, and (b) there
-        // are return statements in the body but (c) they are all shadowed.
-        Result return_value;
-        function_return_.Bind(&return_value);
-        GenerateReturnSequence(&return_value);
-      }
-    }
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    info->isolate()->set_jump_target_compiling_deferred_code(true);
-    ProcessDeferred();
-    info->isolate()->set_jump_target_compiling_deferred_code(false);
-  }
-
-  // There is no need to delete the register allocator, it is a
-  // stack-allocated local.
-  allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(rsi));  // do not overwrite context register
-      Register context = rsi;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return Operand(rsp, 0);
-  }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Result tmp,
-                                                         JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  ASSERT(tmp.is_register());
-  Register context = rsi;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-                Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  slow->Branch(not_equal, not_taken);
-  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
-                                  ControlDestination* dest,
-                                  bool force_control) {
-  ASSERT(!in_spilled_code());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, dest);
-    Visit(expr);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        !dest->is_used() &&
-        frame_->height() == original_height) {
-      dest->Goto(true);
-    }
-  }
-
-  if (force_control && !dest->is_used()) {
-    // Convert the TOS value into flow to the control destination.
-    ToBoolean(dest);
-  }
-
-  ASSERT(!(force_control && !dest->is_used()));
-  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Load(expression);
-  frame_->SpillAll();
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  JumpTarget true_target;
-  JumpTarget false_target;
-  ControlDestination dest(&true_target, &false_target, true);
-  LoadCondition(expr, &dest, false);
-
-  if (dest.false_was_fall_through()) {
-    // The false target was just bound.
-    JumpTarget loaded;
-    frame_->Push(FACTORY->false_value());
-    // There may be dangling jumps to the true target.
-    if (true_target.is_linked()) {
-      loaded.Jump();
-      true_target.Bind();
-      frame_->Push(FACTORY->true_value());
-      loaded.Bind();
-    }
-
-  } else if (dest.is_used()) {
-    // There is true, and possibly false, control flow (with true as
-    // the fall through).
-    JumpTarget loaded;
-    frame_->Push(FACTORY->true_value());
-    if (false_target.is_linked()) {
-      loaded.Jump();
-      false_target.Bind();
-      frame_->Push(FACTORY->false_value());
-      loaded.Bind();
-    }
-
-  } else {
-    // We have a valid value on top of the frame, but we still may
-    // have dangling jumps to the true and false targets from nested
-    // subexpressions (eg, the left subexpressions of the
-    // short-circuited boolean operators).
-    ASSERT(has_valid_frame());
-    if (true_target.is_linked() || false_target.is_linked()) {
-      JumpTarget loaded;
-      loaded.Jump();  // Don't lose the current TOS.
-      if (true_target.is_linked()) {
-        true_target.Bind();
-        frame_->Push(FACTORY->true_value());
-        if (false_target.is_linked()) {
-          loaded.Jump();
-        }
-      }
-      if (false_target.is_linked()) {
-        false_target.Bind();
-        frame_->Push(FACTORY->false_value());
-      }
-      loaded.Bind();
-    }
-  }
-
-  ASSERT(has_valid_frame());
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  if (in_spilled_code()) {
-    frame_->EmitPush(GlobalObjectOperand());
-  } else {
-    Result temp = allocator_->Allocate();
-    __ movq(temp.reg(), GlobalObjectOperand());
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
-  Result temp = allocator_->Allocate();
-  Register reg = temp.reg();
-  __ movq(reg, GlobalObjectOperand());
-  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the arguments marker value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->Push(FACTORY->arguments_marker());
-  } else {
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    frame_->PushFunction();
-    frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope()->num_parameters()));
-    Result result = frame_->CallStub(&stub, 3);
-    frame_->Push(&result);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  bool skip_arguments = false;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
-    if (probe.is_constant()) {
-      // We have to skip updating the arguments object if it has
-      // been assigned a proper value.
-      skip_arguments = !probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      done.Branch(not_equal);
-    }
-  }
-  if (!skip_arguments) {
-    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  }
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-  return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool  persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  // References are loaded from both spilled and unspilled code.  Set the
-  // state to unspilled to allow that (and explicitly spill after
-  // construction at the construction sites).
-  bool was_in_spilled_code = in_spilled_code_;
-  in_spilled_code_ = false;
-
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      // If rax is free, the register allocator prefers it.  Thus the code
-      // generator will load the global object into rax, which is where
-      // LoadIC wants it.  Most uses of Reference call LoadIC directly
-      // after the reference is created.
-      frame_->Spill(rax);
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-
-  in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  // Pop a reference from the stack while preserving TOS.
-  Comment cmnt(masm_, "[ UnloadReference");
-  frame_->Nip(ref->size());
-  ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
-  Comment cmnt(masm_, "[ ToBoolean");
-
-  // The value to convert should be popped from the frame.
-  Result value = frame_->Pop();
-  value.ToRegister();
-
-  if (value.is_number()) {
-    // Fast case if TypeInfo indicates only numbers.
-    if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg());
-    }
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    if (value.is_smi()) {
-      value.Unuse();
-      dest->Split(not_zero);
-    } else {
-      dest->false_target()->Branch(equal);
-      Condition is_smi = masm_->CheckSmi(value.reg());
-      dest->true_target()->Branch(is_smi);
-      __ xorpd(xmm0, xmm0);
-      __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
-      value.Unuse();
-      dest->Split(not_zero);
-    }
-  } else {
-    // Fast case checks.
-    // 'false' => false.
-    __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // 'true' => true.
-    __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
-    dest->true_target()->Branch(equal);
-
-    // 'undefined' => false.
-    __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    dest->false_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(value.reg());
-    dest->true_target()->Branch(is_smi);
-
-    // Call the stub for all other cases.
-    frame_->Push(&value);  // Undo the Pop() from above.
-    ToBooleanStub stub;
-    Result temp = frame_->CallStub(&stub, 1);
-    // Convert the result to a condition code.
-    __ testq(temp.reg(), temp.reg());
-    temp.Unuse();
-    dest->Split(not_equal);
-  }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
-  DeferredInlineBinaryOperation(Token::Value op,
-                                Register dst,
-                                Register left,
-                                Register right,
-                                OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
-    set_comment("[ DeferredInlineBinaryOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register left_;
-  Register right_;
-  OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
-  Label done;
-  if ((op_ == Token::ADD)
-      || (op_ == Token::SUB)
-      || (op_ == Token::MUL)
-      || (op_ == Token::DIV)) {
-    Label call_runtime;
-    Label left_smi, right_smi, load_right, do_op;
-    __ JumpIfSmi(left_, &left_smi);
-    __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_LEFT) {
-      __ movq(dst_, left_);
-    }
-    __ jmp(&load_right);
-
-    __ bind(&left_smi);
-    __ SmiToInteger32(left_, left_);
-    __ cvtlsi2sd(xmm0, left_);
-    __ Integer32ToSmi(left_, left_);
-    if (mode_ == OVERWRITE_LEFT) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&load_right);
-    __ JumpIfSmi(right_, &right_smi);
-    __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_RIGHT) {
-      __ movq(dst_, right_);
-    } else if (mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-    __ jmp(&do_op);
-
-    __ bind(&right_smi);
-    __ SmiToInteger32(right_, right_);
-    __ cvtlsi2sd(xmm1, right_);
-    __ Integer32ToSmi(right_, right_);
-    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&do_op);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
-    }
-    __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    __ jmp(&done);
-
-    __ bind(&call_runtime);
-  }
-  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, left_, right_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
-                                  Token::Value op,
-                                  const Result& right,
-                                  const Result& left) {
-  // Set TypeInfo of result according to the operation performed.
-  // We rely on the fact that smis have a 32 bit payload on x64.
-  STATIC_ASSERT(kSmiValueSize == 32);
-  switch (op) {
-    case Token::COMMA:
-      return right.type_info();
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      return operands_type;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SAR:
-    case Token::SHL:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SHR:
-      // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
-      return (right.is_constant() && right.handle()->IsSmi()
-                     && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
-          ? TypeInfo::Smi()
-          : TypeInfo::Number();
-    case Token::ADD:
-      if (operands_type.IsNumber()) {
-        return TypeInfo::Number();
-      } else if (left.type_info().IsString() || right.type_info().IsString()) {
-        return TypeInfo::String();
-      } else {
-        return TypeInfo::Unknown();
-      }
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      return TypeInfo::Number();
-    default:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
-                                           OverwriteMode overwrite_mode) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Comment cmnt_token(masm_, Token::String(op));
-
-  if (op == Token::COMMA) {
-    // Simply discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  if (op == Token::ADD) {
-    const bool left_is_string = left.type_info().IsString();
-    const bool right_is_string = right.type_info().IsString();
-    // Make sure constant strings have string type info.
-    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
-           left_is_string);
-    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
-           right_is_string);
-    if (left_is_string || right_is_string) {
-      frame_->Push(&left);
-      frame_->Push(&right);
-      Result answer;
-      if (left_is_string) {
-        if (right_is_string) {
-          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        } else {
-          answer =
-            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
-        }
-      } else if (right_is_string) {
-        answer =
-          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
-      }
-      answer.set_type_info(TypeInfo::String());
-      frame_->Push(&answer);
-      return;
-    }
-    // Neither operand is known to be a string.
-  }
-
-  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi_constant =
-      right.is_constant() && !right.handle()->IsSmi();
-
-  if (left_is_smi_constant && right_is_smi_constant) {
-    // Compute the constant result at compile time, and leave it on the frame.
-    int left_int = Smi::cast(*left.handle())->value();
-    int right_int = Smi::cast(*right.handle())->value();
-    if (FoldConstantSmis(op, left_int, right_int)) return;
-  }
-
-  // Get number type of left and right sub-expressions.
-  TypeInfo operands_type =
-      TypeInfo::Combine(left.type_info(), right.type_info());
-
-  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
-  Result answer;
-  if (left_is_non_smi_constant || right_is_non_smi_constant) {
-    // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op,
-                             overwrite_mode,
-                             NO_SMI_CODE_IN_STUB,
-                             operands_type);
-    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-  } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
-                                        false, overwrite_mode);
-  } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
-                                        true, overwrite_mode);
-  } else {
-    // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on smis. Still only
-    // generate the inline Smi check code if this operation is part of a loop.
-    // For all other operations only inline the Smi check code for likely smis
-    // if the operation is part of a loop.
-    if (loop_nesting() > 0 &&
-        (Token::IsBitOp(op) ||
-         operands_type.IsInteger32() ||
-         expr->type()->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
-    } else {
-      GenericBinaryOpStub stub(op,
-                               overwrite_mode,
-                               NO_GENERIC_BINARY_FLAGS,
-                               operands_type);
-      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-    }
-  }
-
-  answer.set_type_info(result_type);
-  frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  Object* answer_object = HEAP->undefined_value();
-  switch (op) {
-    case Token::ADD:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
-        answer_object = Smi::FromInt(left + right);
-      }
-      break;
-    case Token::SUB:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
-        answer_object = Smi::FromInt(left - right);
-      }
-      break;
-    case Token::MUL: {
-        double answer = static_cast<double>(left) * right;
-        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
-          // If the product is zero and the non-zero factor is negative,
-          // the spec requires us to return floating point negative zero.
-          if (answer != 0 || (left >= 0 && right >= 0)) {
-            answer_object = Smi::FromInt(static_cast<int>(answer));
-          }
-        }
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD:
-      break;
-    case Token::BIT_OR:
-      answer_object = Smi::FromInt(left | right);
-      break;
-    case Token::BIT_AND:
-      answer_object = Smi::FromInt(left & right);
-      break;
-    case Token::BIT_XOR:
-      answer_object = Smi::FromInt(left ^ right);
-      break;
-
-    case Token::SHL: {
-        int shift_amount = right & 0x1F;
-        if (Smi::IsValid(left << shift_amount)) {
-          answer_object = Smi::FromInt(left << shift_amount);
-        }
-        break;
-      }
-    case Token::SHR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        unsigned_left >>= shift_amount;
-        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
-          answer_object = Smi::FromInt(unsigned_left);
-        }
-        break;
-      }
-    case Token::SAR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        if (left < 0) {
-          // Perform arithmetic shift of a negative number by
-          // complementing number, logical shifting, complementing again.
-          unsigned_left = ~unsigned_left;
-          unsigned_left >>= shift_amount;
-          unsigned_left = ~unsigned_left;
-        } else {
-          unsigned_left >>= shift_amount;
-        }
-        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
-        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
-        break;
-      }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  if (answer_object->IsUndefined()) {
-    return false;
-  }
-  frame_->Push(Handle<Object>(answer_object));
-  return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
-                                               Result* right,
-                                               JumpTarget* both_smi) {
-  TypeInfo left_info = left->type_info();
-  TypeInfo right_info = right->type_info();
-  if (left_info.IsDouble() || left_info.IsString() ||
-      right_info.IsDouble() || right_info.IsString()) {
-    // We know that left and right are not both smi.  Don't do any tests.
-    return;
-  }
-
-  if (left->reg().is(right->reg())) {
-    if (!left_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
-                                              TypeInfo type,
-                                              DeferredCode* deferred) {
-  if (!type.IsSmi()) {
-        __ JumpIfNotSmi(reg, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(reg);
-  }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  DeferredCode* deferred) {
-  if (!left_info.IsSmi() && !right_info.IsSmi()) {
-    __ JumpIfNotBothSmi(left, right, deferred->entry_label());
-  } else if (!left_info.IsSmi()) {
-    __ JumpIfNotSmi(left, deferred->entry_label());
-  } else if (!right_info.IsSmi()) {
-    __ JumpIfNotSmi(right, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(left);
-    __ AbortIfNotSmi(right);
-  }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
-                                               Result* left,
-                                               Result* right,
-                                               OverwriteMode overwrite_mode) {
-  // Copy the type info because left and right may be overwritten.
-  TypeInfo left_type_info = left->type_info();
-  TypeInfo right_type_info = right->type_info();
-  Token::Value op = expr->op();
-  Result answer;
-  // Special handling of div and mod because they use fixed registers.
-  if (op == Token::DIV || op == Token::MOD) {
-    // We need rax as the quotient register, rdx as the remainder
-    // register, neither left nor right in rax or rdx, and left copied
-    // to rax.
-    Result quotient;
-    Result remainder;
-    bool left_is_in_rax = false;
-    // Step 1: get rax for quotient.
-    if ((left->is_register() && left->reg().is(rax)) ||
-        (right->is_register() && right->reg().is(rax))) {
-      // One or both is in rax.  Use a fresh non-rdx register for
-      // them.
-      Result fresh = allocator_->Allocate();
-      ASSERT(fresh.is_valid());
-      if (fresh.reg().is(rdx)) {
-        remainder = fresh;
-        fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-      }
-      if (left->is_register() && left->reg().is(rax)) {
-        quotient = *left;
-        *left = fresh;
-        left_is_in_rax = true;
-      }
-      if (right->is_register() && right->reg().is(rax)) {
-        quotient = *right;
-        *right = fresh;
-      }
-      __ movq(fresh.reg(), rax);
-    } else {
-      // Neither left nor right is in rax.
-      quotient = allocator_->Allocate(rax);
-    }
-    ASSERT(quotient.is_register() && quotient.reg().is(rax));
-    ASSERT(!(left->is_register() && left->reg().is(rax)));
-    ASSERT(!(right->is_register() && right->reg().is(rax)));
-
-    // Step 2: get rdx for remainder if necessary.
-    if (!remainder.is_valid()) {
-      if ((left->is_register() && left->reg().is(rdx)) ||
-          (right->is_register() && right->reg().is(rdx))) {
-        Result fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-        if (left->is_register() && left->reg().is(rdx)) {
-          remainder = *left;
-          *left = fresh;
-        }
-        if (right->is_register() && right->reg().is(rdx)) {
-          remainder = *right;
-          *right = fresh;
-        }
-        __ movq(fresh.reg(), rdx);
-      } else {
-        // Neither left nor right is in rdx.
-        remainder = allocator_->Allocate(rdx);
-      }
-    }
-    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
-    ASSERT(!(left->is_register() && left->reg().is(rdx)));
-    ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
-    left->ToRegister();
-    right->ToRegister();
-    frame_->Spill(rax);
-    frame_->Spill(rdx);
-
-    // Check that left and right are smi tagged.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          (op == Token::DIV) ? rax : rdx,
-                                          left->reg(),
-                                          right->reg(),
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                  left_type_info, right_type_info, deferred);
-
-    if (op == Token::DIV) {
-      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = quotient;
-    } else {
-      ASSERT(op == Token::MOD);
-      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = remainder;
-    }
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Special handling of shift operations because they use fixed
-  // registers.
-  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
-    // Move left out of rcx if necessary.
-    if (left->is_register() && left->reg().is(rcx)) {
-      *left = allocator_->Allocate();
-      ASSERT(left->is_valid());
-      __ movq(left->reg(), rcx);
-    }
-    right->ToRegister(rcx);
-    left->ToRegister();
-    ASSERT(left->is_register() && !left->reg().is(rcx));
-    ASSERT(right->is_register() && right->reg().is(rcx));
-
-    // We will modify right, it must be spilled.
-    frame_->Spill(rcx);
-
-    // Use a fresh answer register to avoid spilling the left operand.
-    answer = allocator_->Allocate();
-    ASSERT(answer.is_valid());
-    // Check that both operands are smis using the answer register as a
-    // temporary.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          answer.reg(),
-                                          left->reg(),
-                                          rcx,
-                                          overwrite_mode);
-
-    Label do_op;
-    // Left operand must be unchanged in left->reg() for deferred code.
-    // Left operand is in answer.reg(), possibly converted to int32, for
-    // inline code.
-    __ movq(answer.reg(), left->reg());
-    if (right_type_info.IsSmi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(right->reg());
-      }
-      // If left is not known to be a smi, check if it is.
-      // If left is not known to be a number, and it isn't a smi, check if
-      // it is a HeapNumber.
-      if (!left_type_info.IsSmi()) {
-        __ JumpIfSmi(answer.reg(), &do_op);
-        if (!left_type_info.IsNumber()) {
-          // Branch if not a heapnumber.
-          __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
-                 FACTORY->heap_number_map());
-          deferred->Branch(not_equal);
-        }
-        // Load integer value into answer register using truncation.
-        __ cvttsd2si(answer.reg(),
-                     FieldOperand(answer.reg(), HeapNumber::kValueOffset));
-        // Branch if we might have overflowed.
-        // (False negative for Smi::kMinValue)
-        __ cmpl(answer.reg(), Immediate(0x80000000));
-        deferred->Branch(equal);
-        // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
-        __ Integer32ToSmi(answer.reg(), answer.reg());
-      } else {
-        // Fast case - both are actually smis.
-        if (FLAG_debug_code) {
-          __ AbortIfNotSmi(left->reg());
-        }
-      }
-    } else {
-      JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
-                                    left_type_info, right_type_info, deferred);
-    }
-    __ bind(&do_op);
-
-    // Perform the operation.
-    switch (op) {
-      case Token::SAR:
-        __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
-        break;
-      case Token::SHR: {
-        __ SmiShiftLogicalRight(answer.reg(),
-                                answer.reg(),
-                                rcx,
-                                deferred->entry_label());
-        break;
-      }
-      case Token::SHL: {
-        __ SmiShiftLeft(answer.reg(),
-                        answer.reg(),
-                        rcx);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-    deferred->BindExit();
-    left->Unuse();
-    right->Unuse();
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Handle the other binary operations.
-  left->ToRegister();
-  right->ToRegister();
-  // A newly allocated register answer is used to hold the answer.  The
-  // registers containing left and right are not modified so they don't
-  // need to be spilled in the fast case.
-  answer = allocator_->Allocate();
-  ASSERT(answer.is_valid());
-
-  // Perform the smi tag check.
-  DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op,
-                                        answer.reg(),
-                                        left->reg(),
-                                        right->reg(),
-                                        overwrite_mode);
-  JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                left_type_info, right_type_info, deferred);
-
-  switch (op) {
-    case Token::ADD:
-      __ SmiAdd(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::SUB:
-      __ SmiSub(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::MUL: {
-      __ SmiMul(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-    }
-
-    case Token::BIT_OR:
-      __ SmiOr(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_AND:
-      __ SmiAnd(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_XOR:
-      __ SmiXor(answer.reg(), left->reg(), right->reg());
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-  deferred->BindExit();
-  left->Unuse();
-  right->Unuse();
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             Register dst,
-                             Register src,
-                             Smi* value,
-                             OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        src_(src),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register src_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
-  // For mod we don't generate all the Smi code inline.
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, src_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
-  DeferredInlineSmiOperationReversed(Token::Value op,
-                                     Register dst,
-                                     Smi* value,
-                                     Register src,
-                                     OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        value_(value),
-        src_(src),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperationReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Smi* value_;
-  Register src_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, value_, src_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
-  DeferredInlineSmiAdd(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAdd");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
-  DeferredInlineSmiAddReversed(Register dst,
-                               Smi* value,
-                               OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAddReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, value_, dst_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
-  DeferredInlineSmiSub(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiSub");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
-  // Generate inline code for a binary operation when one of the
-  // operands is a constant smi.  Consumes the argument "operand".
-  if (IsUnsafeSmi(value)) {
-    Result unsafe_operand(value);
-    if (reversed) {
-      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
-                               overwrite_mode);
-    } else {
-      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
-                               overwrite_mode);
-    }
-  }
-
-  // Get the literal value.
-  Smi* smi_value = Smi::cast(*value);
-  int int_value = smi_value->value();
-
-  Token::Value op = expr->op();
-  Result answer;
-  switch (op) {
-    case Token::ADD: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
-                                                    smi_value,
-                                                    overwrite_mode);
-      } else {
-        deferred = new DeferredInlineSmiAdd(operand->reg(),
-                                            smi_value,
-                                            overwrite_mode);
-      }
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      __ SmiAddConstant(operand->reg(),
-                        operand->reg(),
-                        smi_value,
-                        deferred->entry_label());
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::SUB: {
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        answer = *operand;
-        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
-                                                          smi_value,
-                                                          overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        // A smi currently fits in a 32-bit Immediate.
-        __ SmiSubConstant(operand->reg(),
-                          operand->reg(),
-                          smi_value,
-                          deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-    }
-
-    case Token::SAR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftArithmeticRightConstant(operand->reg(),
-                                           operand->reg(),
-                                           shift_value);
-        deferred->BindExit();
-        answer = *operand;
-      }
-      break;
-
-    case Token::SHR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           answer.reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftLogicalRightConstant(answer.reg(),
-                                        operand->reg(),
-                                        shift_value,
-                                        deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-
-    case Token::SHL:
-      if (reversed) {
-        operand->ToRegister();
-
-        // We need rcx to be available to hold operand, and to be spilled.
-        // SmiShiftLeft implicitly modifies rcx.
-        if (operand->reg().is(rcx)) {
-          frame_->Spill(operand->reg());
-          answer = allocator()->Allocate();
-        } else {
-          Result rcx_reg = allocator()->Allocate(rcx);
-          // answer must not be rcx.
-          answer = allocator()->Allocate();
-          // rcx_reg goes out of scope.
-        }
-
-        DeferredInlineSmiOperationReversed* deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-
-        __ Move(answer.reg(), smi_value);
-        __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
-        operand->Unuse();
-
-        deferred->BindExit();
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        if (shift_value == 0) {
-          // Spill operand so it can be overwritten in the slow case.
-          frame_->Spill(operand->reg());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          deferred->BindExit();
-          answer = *operand;
-        } else {
-          // Use a fresh temporary for nonzero shift values.
-          answer = allocator()->Allocate();
-          ASSERT(answer.is_valid());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             answer.reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          __ SmiShiftLeftConstant(answer.reg(),
-                                  operand->reg(),
-                                  shift_value);
-          deferred->BindExit();
-          operand->Unuse();
-        }
-      }
-      break;
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      if (reversed) {
-        // Bit operations with a constant smi are commutative.
-        // We can swap left and right operands with no problem.
-        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
-        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
-      }
-      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
-                                                               operand->reg(),
-                                                               operand->reg(),
-                                                               smi_value,
-                                                               overwrite_mode);
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      if (op == Token::BIT_AND) {
-        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
-      } else if (op == Token::BIT_XOR) {
-        if (int_value != 0) {
-          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      } else {
-        ASSERT(op == Token::BIT_OR);
-        if (int_value != 0) {
-          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      }
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    // Generate inline code for mod of powers of 2 and negative powers of 2.
-    case Token::MOD:
-      if (!reversed &&
-          int_value != 0 &&
-          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
-        if (int_value < 0) int_value = -int_value;
-        if (int_value == 1) {
-          __ Move(operand->reg(), Smi::FromInt(0));
-        } else {
-          __ SmiAndConstant(operand->reg(),
-                            operand->reg(),
-                            Smi::FromInt(int_value - 1));
-        }
-        deferred->BindExit();
-        answer = *operand;
-        break;  // This break only applies if we generated code for MOD.
-      }
-      // Fall through if we did not find a power of 2 on the right hand side!
-      // The next case must be the default.
-
-    default: {
-      Result constant_operand(value);
-      if (reversed) {
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                          overwrite_mode);
-      }
-      break;
-    }
-  }
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
-  if (result.type_info().IsSmi()) return false;
-  if (result.type_info().IsInteger32()) return false;
-  if (!result.is_constant()) return true;
-  if (!result.handle()->IsHeapNumber()) return false;
-  return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
-  switch (cc) {
-    case less:          return below;
-    case equal:         return equal;
-    case less_equal:    return below_equal;
-    case greater:       return above;
-    case greater_equal: return above_equal;
-    default:            UNREACHABLE();
-  }
-  UNREACHABLE();
-  return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
-                                        bool inline_number_compare) {
-  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
-  if (nan_info == kCantBothBeNaN) {
-    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
-  }
-  if (inline_number_compare) {
-    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
-  }
-  return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
-                               Condition cc,
-                               bool strict,
-                               ControlDestination* dest) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  Result left_side;
-  Result right_side;
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == greater || cc == less_equal) {
-    cc = ReverseCondition(cc);
-    left_side = frame_->Pop();
-    right_side = frame_->Pop();
-  } else {
-    right_side = frame_->Pop();
-    left_side = frame_->Pop();
-  }
-  ASSERT(cc == less || cc == equal || cc == greater_equal);
-
-  // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi = false;
-  bool left_side_constant_null = false;
-  bool left_side_constant_1_char_string = false;
-  if (left_side.is_constant()) {
-    left_side_constant_smi = left_side.handle()->IsSmi();
-    left_side_constant_null = left_side.handle()->IsNull();
-    left_side_constant_1_char_string =
-        (left_side.handle()->IsString() &&
-         String::cast(*left_side.handle())->length() == 1 &&
-         String::cast(*left_side.handle())->IsAsciiRepresentation());
-  }
-  bool right_side_constant_smi = false;
-  bool right_side_constant_null = false;
-  bool right_side_constant_1_char_string = false;
-  if (right_side.is_constant()) {
-    right_side_constant_smi = right_side.handle()->IsSmi();
-    right_side_constant_null = right_side.handle()->IsNull();
-    right_side_constant_1_char_string =
-        (right_side.handle()->IsString() &&
-         String::cast(*right_side.handle())->length() == 1 &&
-         String::cast(*right_side.handle())->IsAsciiRepresentation());
-  }
-
-  if (left_side_constant_smi || right_side_constant_smi) {
-    bool is_loop_condition = (node->AsExpression() != NULL) &&
-        node->AsExpression()->is_loop_condition();
-    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
-                          left_side_constant_smi, right_side_constant_smi,
-                          is_loop_condition);
-  } else if (left_side_constant_1_char_string ||
-             right_side_constant_1_char_string) {
-    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
-      // Trivial case, comparing two constants.
-      int left_value = String::cast(*left_side.handle())->Get(0);
-      int right_value = String::cast(*right_side.handle())->Get(0);
-      switch (cc) {
-        case less:
-          dest->Goto(left_value < right_value);
-          break;
-        case equal:
-          dest->Goto(left_value == right_value);
-          break;
-        case greater_equal:
-          dest->Goto(left_value >= right_value);
-          break;
-        default:
-          UNREACHABLE();
-      }
-    } else {
-      // Only one side is a constant 1 character string.
-      // If left side is a constant 1-character string, reverse the operands.
-      // Since one side is a constant string, conversion order does not matter.
-      if (left_side_constant_1_char_string) {
-        Result temp = left_side;
-        left_side = right_side;
-        right_side = temp;
-        cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
-        // CompareStub and the inline code both support all values of cc.
-      }
-      // Implement comparison against a constant string, inlining the case
-      // where both sides are strings.
-      left_side.ToRegister();
-
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_not_string, is_string;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
-      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
-      Condition is_smi = masm()->CheckSmi(left_reg);
-      is_not_string.Branch(is_smi, &left_side);
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ movq(temp.reg(),
-              FieldOperand(left_reg, HeapObject::kMapOffset));
-      __ movzxbl(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      // If we are testing for equality then make use of the symbol shortcut.
-      // Check if the left hand side has the same type as the right hand
-      // side (which is always a symbol).
-      if (cc == equal) {
-        Label not_a_symbol;
-        STATIC_ASSERT(kSymbolTag != 0);
-        // Ensure that no non-strings have the symbol bit set.
-        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
-        __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
-        __ j(zero, &not_a_symbol);
-        // They are symbols, so do identity compare.
-        __ Cmp(left_reg, right_side.handle());
-        dest->true_target()->Branch(equal);
-        dest->false_target()->Branch(not_equal);
-        __ bind(&not_a_symbol);
-      }
-      // Call the compare stub if the left side is not a flat ascii string.
-      __ andb(temp.reg(),
-              Immediate(kIsNotStringMask |
-                        kStringRepresentationMask |
-                        kStringEncodingMask));
-      __ cmpb(temp.reg(),
-              Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-      temp.Unuse();
-      is_string.Branch(equal, &left_side);
-
-      // Setup and call the compare stub.
-      is_not_string.Bind(&left_side);
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_string.Bind(&left_side);
-      // left_side is a sequential ASCII string.
-      ASSERT(left_side.reg().is(left_reg));
-      right_side = Result(right_val);
-      Result temp2 = allocator_->Allocate();
-      ASSERT(temp2.is_valid());
-      // Test string equality and comparison.
-      if (cc == equal) {
-        Label comparison_done;
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ j(not_equal, &comparison_done);
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                Immediate(char_value));
-        __ bind(&comparison_done);
-      } else {
-        __ movq(temp2.reg(),
-                FieldOperand(left_side.reg(), String::kLengthOffset));
-        __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
-        Label comparison;
-        // If the length is 0 then the subtraction gave -1 which compares less
-        // than any character.
-        __ j(negative, &comparison);
-        // Otherwise load the first character.
-        __ movzxbl(temp2.reg(),
-                   FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
-        __ bind(&comparison);
-        // Compare the first character of the string with the
-        // constant 1-character string.
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
-        __ cmpb(temp2.reg(), Immediate(char_value));
-        Label characters_were_different;
-        __ j(not_equal, &characters_were_different);
-        // If the first character is the same then the long string sorts after
-        // the short one.
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ bind(&characters_were_different);
-      }
-      temp2.Unuse();
-      left_side.Unuse();
-      right_side.Unuse();
-      dest->Split(cc);
-    }
-  } else {
-    // Neither side is a constant Smi, constant 1-char string, or constant null.
-    // If either side is a non-smi constant, or known to be a heap number,
-    // skip the smi check.
-    bool known_non_smi =
-        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
-        left_side.type_info().IsDouble() ||
-        right_side.type_info().IsDouble();
-
-    NaNInformation nan_info =
-        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
-        kBothCouldBeNaN :
-        kCantBothBeNaN;
-
-    // Inline number comparison handling any combination of smi's and heap
-    // numbers if:
-    //   code is in a loop
-    //   the compare operation is different from equal
-    //   compare is not a for-loop comparison
-    // The reason for excluding equal is that it will most likely be done
-    // with smi's (not heap numbers) and the code to comparing smi's is inlined
-    // separately. The same reason applies for for-loop comparison which will
-    // also most likely be smi comparisons.
-    bool is_loop_condition = (node->AsExpression() != NULL)
-        && node->AsExpression()->is_loop_condition();
-    bool inline_number_compare =
-        loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
-    // Left and right needed in registers for the following code.
-    left_side.ToRegister();
-    right_side.ToRegister();
-
-    if (known_non_smi) {
-      // Inlined equality check:
-      // If at least one of the objects is not NaN, then if the objects
-      // are identical, they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmpq(left_side.reg(), right_side.reg());
-        dest->true_target()->Branch(equal);
-      }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
-      CompareStub stub(cc, strict, flags);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
-      answer.Unuse();
-      dest->Split(cc);
-    } else {
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Register right_reg = right_side.reg();
-
-      // In-line check for comparing two smis.
-      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
-      if (has_valid_frame()) {
-        // Inline the equality check if both operands can't be a NaN. If both
-        // objects are the same they are equal.
-        if (nan_info == kCantBothBeNaN && cc == equal) {
-          __ cmpq(left_side.reg(), right_side.reg());
-          dest->true_target()->Branch(equal);
-        }
-
-        // Inlined number comparison:
-        if (inline_number_compare) {
-          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-        }
-
-        // End of in-line compare, call out to the compare stub. Don't include
-        // number comparison in the stub if it was inlined.
-        CompareFlags flags =
-            ComputeCompareFlags(nan_info, inline_number_compare);
-        CompareStub stub(cc, strict, flags);
-        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-        __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
-        answer.Unuse();
-        if (is_smi.is_linked()) {
-          dest->true_target()->Branch(cc);
-          dest->false_target()->Jump();
-        } else {
-          dest->Split(cc);
-        }
-      }
-
-      if (is_smi.is_linked()) {
-        is_smi.Bind();
-        left_side = Result(left_reg);
-        right_side = Result(right_reg);
-        __ SmiCompare(left_side.reg(), right_side.reg());
-        right_side.Unuse();
-        left_side.Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
-                                          bool strict,
-                                          ControlDestination* dest,
-                                          Result* left_side,
-                                          Result* right_side,
-                                          bool left_side_constant_smi,
-                                          bool right_side_constant_smi,
-                                          bool is_loop_condition) {
-  if (left_side_constant_smi && right_side_constant_smi) {
-    // Trivial case, comparing two constants.
-    int left_value = Smi::cast(*left_side->handle())->value();
-    int right_value = Smi::cast(*right_side->handle())->value();
-    switch (cc) {
-      case less:
-        dest->Goto(left_value < right_value);
-        break;
-      case equal:
-        dest->Goto(left_value == right_value);
-        break;
-      case greater_equal:
-        dest->Goto(left_value >= right_value);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  } else {
-    // Only one side is a constant Smi.
-    // If left side is a constant Smi, reverse the operands.
-    // Since one side is a constant Smi, conversion order does not matter.
-    if (left_side_constant_smi) {
-      Result* temp = left_side;
-      left_side = right_side;
-      right_side = temp;
-      cc = ReverseCondition(cc);
-      // This may re-introduce greater or less_equal as the value of cc.
-      // CompareStub and the inline code both support all values of cc.
-    }
-    // Implement comparison against a constant Smi, inlining the case
-    // where both sides are smis.
-    left_side->ToRegister();
-    Register left_reg = left_side->reg();
-    Smi* constant_smi = Smi::cast(*right_side->handle());
-
-    if (left_side->is_smi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(left_reg);
-      }
-      // Test smi equality and comparison by signed int comparison.
-      __ SmiCompare(left_reg, constant_smi);
-      left_side->Unuse();
-      right_side->Unuse();
-      dest->Split(cc);
-    } else {
-      // Only the case where the left side could possibly be a non-smi is left.
-      JumpTarget is_smi;
-      if (cc == equal) {
-        // We can do the equality comparison before the smi check.
-        __ Cmp(left_reg, constant_smi);
-        dest->true_target()->Branch(equal);
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        dest->false_target()->Branch(left_is_smi);
-      } else {
-        // Do the smi check, then the comparison.
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        is_smi.Branch(left_is_smi, left_side, right_side);
-      }
-
-      // Jump or fall through to here if we are comparing a non-smi to a
-      // constant smi.  If the non-smi is a heap number and this is not
-      // a loop condition, inline the floating point code.
-      if (!is_loop_condition) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        JumpTarget not_number;
-        __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        not_number.Branch(not_equal, left_side);
-        __ movsd(xmm1,
-                 FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = constant_smi->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ movl(temp.reg(), Immediate(value));
-          __ cvtlsi2sd(xmm0, temp.reg());
-          temp.Unuse();
-        }
-        __ ucomisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, left_side);
-        left_side->Unuse();
-        dest->true_target()->Branch(DoubleCondition(cc));
-        dest->false_target()->Jump();
-        not_number.Bind(left_side);
-      }
-
-      // Setup and call the compare stub.
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, left_side, right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      if (cc == equal) {
-        dest->Split(cc);
-      } else {
-        dest->true_target()->Branch(cc);
-        dest->false_target()->Jump();
-
-        // It is important for performance for this case to be at the end.
-        is_smi.Bind(left_side, right_side);
-        __ SmiCompare(left_reg, constant_smi);
-        left_side->Unuse();
-        right_side->Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
-                                  Result* operand,
-                                  XMMRegister xmm_reg,
-                                  Result* left_side,
-                                  Result* right_side,
-                                  JumpTarget* not_numbers) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-  } else {
-    // Operand type not known, check for smi or heap number.
-    Label smi;
-    __ JumpIfSmi(operand->reg(), &smi);
-    if (!operand->type_info().IsNumber()) {
-      __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-      __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-              kScratchRegister);
-      not_numbers->Branch(not_equal, left_side, right_side, taken);
-    }
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-
-    __ bind(&smi);
-    // Comvert smi to float and keep the original smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
-                                                   Result* right_side,
-                                                   Condition cc,
-                                                   ControlDestination* dest) {
-  ASSERT(left_side->is_register());
-  ASSERT(right_side->is_register());
-
-  JumpTarget not_numbers;
-  // Load left and right operand into registers xmm0 and xmm1 and compare.
-  LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
-                        &not_numbers);
-  LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
-                        &not_numbers);
-  __ ucomisd(xmm0, xmm1);
-  // Bail out if a NaN is involved.
-  not_numbers.Branch(parity_even, left_side, right_side);
-
-  // Split to destination targets based on comparison.
-  left_side->Unuse();
-  right_side->Unuse();
-  dest->true_target()->Branch(DoubleCondition(cc));
-  dest->false_target()->Jump();
-
-  not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-    frame_->SpillTop();
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  Result answer = frame_->CallStub(&call_function, arg_count + 1);
-  // Restore context and replace function on the stack with the
-  // result of the stub invocation.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  frame()->Dup();
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame()->Push(name);
-  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  __ nop();
-  frame()->Push(&answer);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of frame at this point:
-  // Frame[0]: arguments object of the current function or the hole.
-  // Frame[1]: receiver
-  // Frame[2]: applicand.apply
-  // Frame[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  frame_->Dup();
-  Result probe = frame_->Pop();
-  { VirtualFrame::SpilledScope spilled_scope;
-    Label slow, done;
-    bool try_lazy = true;
-    if (probe.is_constant()) {
-      try_lazy = probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      __ j(not_equal, &slow);
-    }
-
-    if (try_lazy) {
-      Label build_args;
-      // Get rid of the arguments object probe.
-      frame_->Drop();  // Can be called on a spilled frame.
-      // Stack now has 3 elements on it.
-      // Contents of stack at this point:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // Check that the receiver really is a JavaScript object.
-      __ movq(rax, Operand(rsp, 0));
-      Condition is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      // We allow all JSObjects including JSFunctions.  As long as
-      // JS_FUNCTION_TYPE is the last instance type and it is right
-      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-      // bound.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-      __ j(below, &build_args);
-
-      // Check that applicand.apply is Function.prototype.apply.
-      __ movq(rax, Operand(rsp, kPointerSize));
-      is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-      __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
-      __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply();
-      __ Cmp(rcx, apply_code);
-      __ j(not_equal, &build_args);
-
-      // Check that applicand is a function.
-      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
-      is_smi = masm_->CheckSmi(rdi);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-
-      // Copy the arguments to this function possibly from the
-      // adaptor frame below it.
-      Label invoke, adapted;
-      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-      __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
-             Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-      __ j(equal, &adapted);
-
-      // No arguments adaptor frame. Copy fixed number of arguments.
-      __ Set(rax, scope()->num_parameters());
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        __ push(frame_->ParameterAt(i));
-      }
-      __ jmp(&invoke);
-
-      // Arguments adaptor frame present. Copy arguments from there, but
-      // avoid copying too many arguments to avoid stack overflows.
-      __ bind(&adapted);
-      static const uint32_t kArgumentsLimit = 1 * KB;
-      __ SmiToInteger32(rax,
-                        Operand(rdx,
-                                ArgumentsAdaptorFrameConstants::kLengthOffset));
-      __ movl(rcx, rax);
-      __ cmpl(rax, Immediate(kArgumentsLimit));
-      __ j(above, &build_args);
-
-      // Loop through the arguments pushing them onto the execution
-      // stack. We don't inform the virtual frame of the push, so we don't
-      // have to worry about getting rid of the elements from the virtual
-      // frame.
-      Label loop;
-      // rcx is a small non-negative integer, due to the test above.
-      __ testl(rcx, rcx);
-      __ j(zero, &invoke);
-      __ bind(&loop);
-      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
-      __ decl(rcx);
-      __ j(not_zero, &loop);
-
-      // Invoke the function.
-      __ bind(&invoke);
-      ParameterCount actual(rax);
-      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-      // Drop applicand.apply and applicand from the stack, and push
-      // the result of the function call, but leave the spilled frame
-      // unchanged, with 3 elements, so it is correct when we compile the
-      // slow-case code.
-      __ addq(rsp, Immediate(2 * kPointerSize));
-      __ push(rax);
-      // Stack now has 1 element:
-      //   rsp[0]: result
-      __ jmp(&done);
-
-      // Slow-case: Allocate the arguments object since we know it isn't
-      // there, and fall-through to the slow-case where we call
-      // applicand.apply.
-      __ bind(&build_args);
-      // Stack now has 3 elements, because we have jumped from where:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // StoreArgumentsObject requires a correct frame, and may modify it.
-      Result arguments_object = StoreArgumentsObject(false);
-      frame_->SpillAll();
-      arguments_object.ToRegister();
-      frame_->EmitPush(arguments_object.reg());
-      arguments_object.Unuse();
-      // Stack and frame now have 4 elements.
-      __ bind(&slow);
-    }
-
-    // Generic computation of x.apply(y, args) with no special optimization.
-    // Flip applicand.apply and applicand on the stack, so
-    // applicand looks like the receiver of the applicand.apply call.
-    // Then process it as a normal function call.
-    __ movq(rax, Operand(rsp, 3 * kPointerSize));
-    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-    __ movq(Operand(rsp, 2 * kPointerSize), rax);
-    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
-    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-    Result res = frame_->CallStub(&call_function, 3);
-    // The function and its two arguments have been dropped.
-    frame_->Drop(1);  // Drop the receiver as well.
-    res.ToRegister();
-    frame_->EmitPush(res.reg());
-    // Stack now has 1 element:
-    //   rsp[0]: result
-    if (try_lazy) __ bind(&done);
-  }  // End of spilled scope.
-  // Restore the context register after a call.
-  frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
-  DeferredStackCheck() {
-    set_comment("[ DeferredStackCheck");
-  }
-
-  virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
-  StackCheckStub stub;
-  __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
-  DeferredStackCheck* deferred = new DeferredStackCheck;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  deferred->Branch(below);
-  deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Visit(statement);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  VisitStatements(statements);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.  The inevitable call
-  // will sync frame elements to memory anyway, so we do it eagerly to
-  // allow us to push the arguments directly into place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
-  frame_->EmitPush(rsi);  // The context is the first argument.
-  frame_->EmitPush(kScratchRegister);
-  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
-  frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
-  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.  Sync the virtual frame eagerly
-    // so we can simply push the arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    // Declaration nodes are always introduced in one of two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Smi::FromInt(attr));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-  if (val != NULL) {
-    {
-      // Set the initial value.
-      Reference target(this, node->proxy());
-      Load(val);
-      target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
-    }
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  // Remove the lingering expression result from the top of stack.
-  frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    JumpTarget then;
-    JumpTarget else_;
-    ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The else target was bound, so we compile the else part first.
-      Visit(node->else_statement());
-
-      // We may have dangling jumps to the then part.
-      if (then.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then target was bound, so we compile the then part first.
-      Visit(node->then_statement());
-
-      if (else_.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    }
-
-  } else if (has_then_stm) {
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // then part.
-      if (then.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then label was bound.
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.true_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // else part.
-      if (else_.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    } else {
-      // The else label was bound.
-      Visit(node->else_statement());
-    }
-
-  } else {
-    ASSERT(!has_then_stm && !has_else_stm);
-    // We only care about the condition's side effects (not its value
-    // or control flow effect).  LoadCondition is called without
-    // forcing control flow.
-    ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), &dest, false);
-    if (!dest.is_used()) {
-      // We got a value on the frame rather than (or in addition to)
-      // control flow.
-      frame_->Drop();
-    }
-  }
-
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result return_value = frame_->Pop();
-  masm()->positions_recorder()->WriteRecordedPositions();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump(&return_value);
-  } else {
-    frame_->PrepareForReturn();
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump(&return_value);
-    } else {
-      function_return_.Bind(&return_value);
-      GenerateReturnSequence(&return_value);
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
-  // The return value is a live (but not currently reference counted)
-  // reference to rax.  This is safe because the current frame does not
-  // contain a reference to rax (it is prepared for the return by spilling
-  // all registers).
-  if (FLAG_trace) {
-    frame_->Push(return_value);
-    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-  return_value->ToRegister(rax);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-
-  // Leave the frame and return popping the arguments and the
-  // receiver.
-  frame_->Exit();
-  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
-  __ Ret(arguments_bytes, rcx);
-  DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Add padding that will be overwritten by a debugger breakpoint.
-  // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
-  // with length 7 (3 + 1 + 3).
-  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
-  for (int i = 0; i < kPadding; ++i) {
-    masm_->int3();
-  }
-  // Check that the size of the code used for returning is large enough
-  // for the debugger's requirements.
-  ASSERT(Assembler::kJSReturnSequenceLength <=
-         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result context;
-  if (node->is_catch_block()) {
-    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    context = frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  // Update context local.
-  frame_->SaveContextRegister();
-
-  // Verify that the runtime call result and rsi agree.
-  if (FLAG_debug_code) {
-    __ cmpq(context.reg(), rsi);
-    __ Assert(equal, "Runtime::NewContext should end up in rsi");
-  }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
-  // Update context local.
-  frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  // Compile the switch value.
-  Load(node->tag());
-
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  JumpTarget next_test;
-  // Compile the case label expressions and comparisons.  Exit early
-  // if a comparison is unconditionally true.  The target next_test is
-  // bound before the loop in order to indicate control flow to the
-  // first comparison.
-  next_test.Bind();
-  for (int i = 0; i < length && !next_test.is_unused(); i++) {
-    CaseClause* clause = cases->at(i);
-    // The default is not a test, but remember it for later.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    // We recycle the same target next_test for each test.  Bind it if
-    // the previous test has not done so and then unuse it for the
-    // loop.
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    next_test.Unuse();
-
-    // Duplicate the switch value.
-    frame_->Dup();
-
-    // Compile the label expression.
-    Load(clause->label());
-
-    // Compare and branch to the body if true or the next test if
-    // false.  Prefer the next test as a fall through.
-    ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(node, equal, true, &dest);
-
-    // If the comparison fell through to the true target, jump to the
-    // actual body.
-    if (dest.true_was_fall_through()) {
-      clause->body_target()->Unuse();
-      clause->body_target()->Jump();
-    }
-  }
-
-  // If there was control flow to a next test from the last one
-  // compiled, compile a jump to the default or break target.
-  if (!next_test.is_unused()) {
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    // Drop the switch value.
-    frame_->Drop();
-    if (default_clause != NULL) {
-      default_clause->body_target()->Jump();
-    } else {
-      node->break_target()->Jump();
-    }
-  }
-
-  // The last instruction emitted was a jump, either to the default
-  // clause or the break target, or else to a case body from the loop
-  // that compiles the tests.
-  ASSERT(!has_valid_frame());
-  // Compile case bodies as needed.
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-
-    // There are two ways to reach the body: from the corresponding
-    // test or as the fall through of the previous body.
-    if (clause->body_target()->is_linked() || has_valid_frame()) {
-      if (clause->body_target()->is_linked()) {
-        if (has_valid_frame()) {
-          // If we have both a jump to the test and a fall through, put
-          // a jump on the fall through path to avoid the dropping of
-          // the switch value on the test path.  The exception is the
-          // default which has already had the switch value dropped.
-          if (clause->is_default()) {
-            clause->body_target()->Bind();
-          } else {
-            JumpTarget body;
-            body.Jump();
-            clause->body_target()->Bind();
-            frame_->Drop();
-            body.Bind();
-          }
-        } else {
-          // No fall through to worry about.
-          clause->body_target()->Bind();
-          if (!clause->is_default()) {
-            frame_->Drop();
-          }
-        }
-      } else {
-        // Otherwise, we have only fall through.
-        ASSERT(has_valid_frame());
-      }
-
-      // We are now prepared to compile the body.
-      Comment cmnt(masm_, "[ Case body");
-      VisitStatements(clause->statements());
-    }
-    clause->body_target()->Unuse();
-  }
-
-  // We may not have a valid frame here so bind the break target only
-  // if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  // Label the top of the loop for the backward jump if necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // Use the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      // No need to label it.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      break;
-    case DONT_KNOW:
-      // Continue is the test, so use the backward body target.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control flow can fall off the end of the body, jump back
-      // to the top and bind the break target at the exit.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // We may have had continues or breaks in the body.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), &dest, true);
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-  }
-
-  DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop with the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is the test at the bottom, no need to label the test
-        // at the top.  The body is a backward target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Label the test at the top as the continue target.  The body
-        // is a forward-only target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      }
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // The loop body has been labeled with the continue target.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        // If we have chosen to recompile the test at the bottom,
-        // then it is the continue target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here and thus an invalid fall-through).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // If we have chosen not to recompile the test at the bottom,
-        // jump back to the one at the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
-  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
-  if (slot->type() == Slot::LOCAL) {
-    frame_->SetTypeForLocalAt(slot->index(), info);
-  } else {
-    frame_->SetTypeForParamAt(slot->index(), info);
-  }
-  if (FLAG_debug_code && info.IsSmi()) {
-    if (slot->type() == Slot::LOCAL) {
-      frame_->PushLocalAt(slot->index());
-    } else {
-      frame_->PushParameterAt(slot->index());
-    }
-    Result var = frame_->Pop();
-    var.ToRegister();
-    __ AbortIfNotSmi(var.reg());
-  }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
-  // A fast smi loop is a for loop with an initializer
-  // that is a simple assignment of a smi to a stack variable,
-  // a test that is a simple test of that variable against a smi constant,
-  // and a step that is a increment/decrement of the variable, and
-  // where the variable isn't modified in the loop body.
-  // This guarantees that the variable is always a smi.
-
-  Variable* loop_var = node->loop_variable();
-  Smi* initial_value = *Handle<Smi>::cast(node->init()
-      ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
-  Smi* limit_value = *Handle<Smi>::cast(
-      node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
-  Token::Value compare_op =
-      node->cond()->AsCompareOperation()->op();
-  bool increments =
-      node->next()->StatementAsCountOperation()->op() == Token::INC;
-
-  // Check that the condition isn't initially false.
-  bool initially_false = false;
-  int initial_int_value = initial_value->value();
-  int limit_int_value = limit_value->value();
-  switch (compare_op) {
-    case Token::LT:
-      initially_false = initial_int_value >= limit_int_value;
-      break;
-    case Token::LTE:
-      initially_false = initial_int_value > limit_int_value;
-      break;
-    case Token::GT:
-      initially_false = initial_int_value <= limit_int_value;
-      break;
-    case Token::GTE:
-      initially_false = initial_int_value < limit_int_value;
-      break;
-    default:
-      UNREACHABLE();
-  }
-  if (initially_false) return;
-
-  // Only check loop condition at the end.
-
-  Visit(node->init());
-
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  // Set type and stack height of BreakTargets.
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  IncrementLoopNesting();
-  loop.Bind();
-
-  // Set number type of the loop variable to smi.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
-  Visit(node->body());
-
-  if (node->continue_target()->is_linked()) {
-    node->continue_target()->Bind();
-  }
-
-  if (has_valid_frame()) {
-    CodeForStatementPosition(node);
-    Slot* loop_var_slot = loop_var->AsSlot();
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->TakeLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->TakeParameterAt(loop_var_slot->index());
-    }
-    Result loop_var_result = frame_->Pop();
-    if (!loop_var_result.is_register()) {
-      loop_var_result.ToRegister();
-    }
-    Register loop_var_reg = loop_var_result.reg();
-    frame_->Spill(loop_var_reg);
-    if (increments) {
-      __ SmiAddConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    } else {
-      __ SmiSubConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    }
-
-    frame_->Push(&loop_var_result);
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->StoreToParameterAt(loop_var_slot->index());
-    }
-    frame_->Drop();
-
-    __ SmiCompare(loop_var_reg, limit_value);
-    Condition condition;
-    switch (compare_op) {
-      case Token::LT:
-        condition = less;
-        break;
-      case Token::LTE:
-        condition = less_equal;
-        break;
-      case Token::GT:
-        condition = greater;
-        break;
-      case Token::GTE:
-        condition = greater_equal;
-        break;
-      default:
-        condition = never;
-        UNREACHABLE();
-    }
-    loop.Branch(condition);
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-
-  if (node->is_fast_smi_loop()) {
-    GenerateFastSmiLoop(node);
-    return;
-  }
-
-  // Compile the init expression if present.
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything else.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-
-  // Target for backward edge if no test at the bottom, otherwise
-  // unused.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-  // Target for backward edge if there is a test at the bottom,
-  // otherwise used as target for test at the top.
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop.
-      if (node->next() == NULL) {
-        // Use the continue target if there is no update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // Otherwise use the backward loop target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is either the update expression or the test at the
-        // bottom, no need to label the test at the top.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else if (node->next() == NULL) {
-        // We are not recompiling the test at the bottom and there is no
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // We are not recompiling the test at the bottom and there is an
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  Visit(node->body());
-
-  // If there is an update expression, compile it if necessary.
-  if (node->next() != NULL) {
-    if (node->continue_target()->is_linked()) {
-      node->continue_target()->Bind();
-    }
-
-    // Control can reach the update by falling out of the body or by a
-    // continue.
-    if (has_valid_frame()) {
-      // Record the source position of the statement as this code which
-      // is after the code for the body actually belongs to the loop
-      // statement and not the body.
-      CodeForStatementPosition(node);
-      Visit(node->next());
-    }
-  }
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      if (has_valid_frame()) {
-        if (node->next() == NULL) {
-          node->continue_target()->Jump();
-        } else {
-          loop.Jump();
-        }
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        if (node->continue_target()->is_linked()) {
-          // We can have dangling jumps to the continue target if there
-          // was no update expression.
-          node->continue_target()->Bind();
-        }
-        // Control can reach the test at the bottom by falling out of
-        // the body, by a continue in the body, or from the update
-        // expression.
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // Otherwise, jump back to the test at the top.
-        if (has_valid_frame()) {
-          if (node->next() == NULL) {
-            node->continue_target()->Jump();
-          } else {
-            loop.Jump();
-          }
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  LoadAndSpill(node->enumerable());
-
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(rax);
-
-  // rax: value to be iterated over
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  exit.Branch(equal);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  exit.Branch(equal);
-
-  // Stack layout in body:
-  // [iteration counter (smi)] <- slot 0
-  // [length of array]         <- slot 1
-  // [FixedArray]              <- slot 2
-  // [Map or 0]                <- slot 3
-  // [Object]                  <- slot 4
-
-  // Check if enumerable is already a JSObject
-  // rax: value to be iterated over
-  Condition is_smi = masm_->CheckSmi(rax);
-  primitive.Branch(is_smi);
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-  jsobject.Branch(above_equal);
-
-  primitive.Bind();
-  frame_->EmitPush(rax);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
-  // function call returns the value in rax, which is where we want it below
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // rax: value to be iterated over
-  frame_->EmitPush(rax);  // Push the object being iterated over.
-
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ movq(rcx, rax);
-  loop.Bind();
-  // Check that there are no elements.
-  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
-  call_runtime.Branch(equal);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
-  is_smi = masm_->CheckSmi(rdx);
-  call_runtime.Branch(is_smi);
-  // For all objects but the receiver, check that the cache is empty.
-  __ cmpq(rcx, rax);
-  check_prototype.Branch(equal);
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
-  loop.Branch(not_equal);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a Map, we can do a fast modification check.
-  // Otherwise, we got a FixedArray, and we have to do a slow check.
-  // rax: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ movq(rdx, rax);
-  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
-  fixed_array.Branch(not_equal);
-
-  use_cache.Bind();
-  // Get enum cache
-  // rax: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ movq(rcx, rax);
-  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
-  // Get the bridge array held in the enumeration index field.
-  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
-  // Get the cache from the bridge array.
-  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(rax);  // <- slot 3
-  frame_->EmitPush(rdx);  // <- slot 2
-  __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-  entry.Jump();
-
-  fixed_array.Bind();
-  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
-  frame_->EmitPush(rax);  // <- slot 2
-
-  // Push the length of the array and the initial index onto the stack.
-  __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-
-  // Condition.
-  entry.Bind();
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
-  node->break_target()->Branch(below_equal);
-
-  // Get the i'th entry of the array.
-  __ movq(rdx, frame_->ElementAt(2));
-  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx,
-          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
-  // Get the expected map from the stack or a zero map in the
-  // permanent slow case rax: current iteration count rbx: i'th entry
-  // of the enum cache
-  __ movq(rdx, frame_->ElementAt(3));
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
-  // rax: current iteration count
-  // rbx: i'th entry of the enum cache
-  // rdx: expected map value
-  __ movq(rcx, frame_->ElementAt(4));
-  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ cmpq(rcx, rdx);
-  end_del_check.Branch(equal);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(rbx);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
-  __ movq(rbx, rax);
-
-  // If the property has been removed while iterating, we just skip it.
-  __ Cmp(rbx, Smi::FromInt(0));
-  node->continue_target()->Branch(equal);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  rdx: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(rbx);
-  { Reference each(this, node->each());
-    // Loading a reference may leave the frame in an unspilled state.
-    frame_->SpillAll();
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        frame_->EmitPush(frame_->ElementAt(each.size()));
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop(2);  // Drop the original and the copy of the element.
-      } else {
-        // If the reference has size zero then we can use the value below
-        // the reference as if it were above the reference, instead of pushing
-        // a new copy of it above the reference.
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop();  // Drop the original of the element.
-      }
-    }
-  }
-  // Unloading a reference may leave the frame in an unspilled state.
-  frame_->SpillAll();
-
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  VisitAndSpill(node->body());
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(rax);
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  frame_->EmitPush(rax);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(rax);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  VisitStatementsAndSpill(node->catch_block()->statements());
-  if (has_valid_frame()) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // Make sure that there's nothing left on the stack above the
-  // handler structure.
-  if (FLAG_debug_code) {
-    __ movq(kScratchRegister, handler_address);
-    __ cmpq(rsp, Operand(kScratchRegister, 0));
-    __ Assert(equal, "stack pointer should point to top handler");
-  }
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing targets that
-  // have been jumped to.  Deallocate each shadow target.
-  Result return_value;
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain; be careful not to destroy the TOS if
-      // there is one.
-      if (i == kReturnShadowIndex) {
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
-        shadows[i]->other_target()->Jump(&return_value);
-      } else {
-        shadows[i]->other_target()->Jump();
-      }
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(rax);
-  // In case of thrown exceptions, this is where we continue.
-  __ Move(rcx, Smi::FromInt(THROWING));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in ecx, then jump around the unlink blocks if any.
-    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-    __ Move(rcx, Smi::FromInt(FALLING));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // on the virtual frame.  We must preserve it until it is
-      // pushed.
-      if (i == kReturnShadowIndex) {
-        Result return_value;
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this target shadowed the function return, materialize
-        // the return value on the stack.
-        frame_->EmitPush(rax);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-      }
-      __ Move(rcx, Smi::FromInt(JUMPING + i));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(rcx);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  VisitStatementsAndSpill(node->finally_block()->statements());
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(rcx);
-    frame_->EmitPop(rax);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      BreakTarget* original = shadows[i]->other_target();
-      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
-      if (i == kReturnShadowIndex) {
-        // The return value is (already) in rax.
-        Result return_value = allocator_->Allocate(rax);
-        ASSERT(return_value.is_valid());
-        if (function_return_is_shadowed_) {
-          original->Branch(equal, &return_value);
-        } else {
-          // Branch around the preparation for return which may emit
-          // code.
-          JumpTarget skip;
-          skip.Branch(not_equal);
-          frame_->PrepareForReturn();
-          original->Jump(&return_value);
-          skip.Bind();
-        }
-      } else {
-        original->Branch(equal);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ SmiCompare(rcx, Smi::FromInt(THROWING));
-    exit.Branch(not_equal);
-
-    // Rethrow exception.
-    frame_->EmitPush(rax);  // undo pop from above
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Spill everything, even constants, to the frame.
-  frame_->SpillAll();
-
-  frame_->DebugBreak();
-  // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // The inevitable call will sync frame elements to memory anyway, so
-  // we do it eagerly to allow us to push the arguments directly into
-  // place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame_->Push(function_info);
-    Result answer = frame_->CallStub(&stub, 1);
-    frame_->Push(&answer);
-  } else {
-    // Call the runtime to instantiate the function based on the
-    // shared function info.
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(function_info);
-    frame_->EmitPush(pretenure
-                     ? FACTORY->true_value()
-                     : FACTORY->false_value());
-    Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
-    frame_->Push(&result);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  // Check for stack-overflow exception.
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    return;
-  }
-  InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then;
-  JumpTarget else_;
-  JumpTarget exit;
-  ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), &dest, true);
-
-  if (dest.false_was_fall_through()) {
-    // The else target was bound, so we compile the else part first.
-    Load(node->else_expression());
-
-    if (then.is_linked()) {
-      exit.Jump();
-      then.Bind();
-      Load(node->then_expression());
-    }
-  } else {
-    // The then target was bound, so we compile the then part first.
-    Load(node->then_expression());
-
-    if (else_.is_linked()) {
-      exit.Jump();
-      else_.Bind();
-      Load(node->else_expression());
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    JumpTarget slow;
-    JumpTarget done;
-    Result value;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &value,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // A runtime call is inevitable.  We eagerly sync frame elements
-    // to memory so that we can push the arguments directly into place
-    // on top of the frame.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    if (typeof_state == INSIDE_TYPEOF) {
-       value =
-         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind(&value);
-    frame_->Push(&value);
-
-  } else if (slot->var()->mode() == Variable::CONST) {
-    // Const slots may contain 'the hole' value (the constant hasn't been
-    // initialized yet) which needs to be converted into the 'undefined'
-    // value.
-    //
-    // We currently spill the virtual frame because constants use the
-    // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope;
-    Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit;
-    __ movq(rcx, SlotOperand(slot, rcx));
-    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-    exit.Branch(not_equal);
-    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
-    exit.Bind();
-    frame_->EmitPush(rcx);
-
-  } else if (slot->type() == Slot::PARAMETER) {
-    frame_->PushParameterAt(slot->index());
-
-  } else if (slot->type() == Slot::LOCAL) {
-    frame_->PushLocalAt(slot->index());
-
-  } else {
-    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
-    // here.
-    //
-    // The use of SlotOperand below is safe for an unspilled frame
-    // because it will always be a context slot.
-    ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Pop the loaded value from the stack.
-  Result value = frame_->Pop();
-
-  // If the loaded value is a constant, we know if the arguments
-  // object has been lazily loaded yet.
-  if (value.is_constant()) {
-    if (value.handle()->IsArgumentsMarker()) {
-      Result arguments = StoreArgumentsObject(false);
-      frame_->Push(&arguments);
-    } else {
-      frame_->Push(&value);
-    }
-    return;
-  }
-
-  // The loaded value is in a register. If it is the sentinel that
-  // indicates that we haven't loaded the arguments object yet, we
-  // need to do it now.
-  JumpTarget exit;
-  __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
-  frame_->Push(&value);
-  exit.Branch(not_equal);
-  Result arguments = StoreArgumentsObject(false);
-  frame_->SetElementAt(0, &arguments);
-  exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    JumpTarget* slow) {
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register context = rsi;
-  Result tmp = allocator_->Allocate();
-  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      // Load next context in chain.
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(tmp.reg())) {
-      __ movq(tmp.reg(), context);
-    }
-    // Load map for comparison into register, outside loop.
-    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
-    __ bind(&next);
-    // Terminate at global context.
-    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
-    __ j(equal, &fast);
-    // Check that extension is NULL.
-    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
-    slow->Branch(not_equal);
-    // Load next context in chain.
-    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
-    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-  tmp.Unuse();
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  LoadGlobal();
-  frame_->Push(slot->var()->name());
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT;
-  Result answer = frame_->CallLoadIC(mode);
-  // A test rax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test rax
-  // instruction here.
-  masm_->nop();
-  return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    Result* result,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    done->Jump(result);
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      // Allocate a fresh register to use as a temp in
-      // ContextSlotOperandCheckExtensions and to hold the result
-      // value.
-      *result = allocator_->Allocate();
-      ASSERT(result->is_valid());
-      __ movq(result->reg(),
-              ContextSlotOperandCheckExtensions(potential_slot,
-                                                *result,
-                                                slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
-        done->Branch(not_equal, result);
-        __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
-      }
-      done->Jump(result);
-    } else if (rewrite != NULL) {
-      // Generate fast case for argument loads.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          Result arguments = allocator()->Allocate();
-          ASSERT(arguments.is_valid());
-          __ movq(arguments.reg(),
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                    arguments,
-                                                    slow));
-          frame_->Push(&arguments);
-          frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad();
-          done->Jump(result);
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.  Since the call is inevitable,
-    // we eagerly sync the virtual frame so we can directly push the
-    // arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(slot->var()->name());
-
-    Result value;
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize const
-      // properties (introduced via eval("const foo = (some expr);")). Also,
-      // uses the current function context instead of the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the same
-      // time, because the const declaration may be at the end of the eval
-      // code (sigh...) and the const variable may have been used before
-      // (where its value is 'undefined'). Thus, we can only do the
-      // initialization when we actually encounter the expression and when
-      // the expression operands are defined and valid, and thus we need the
-      // split into 2 operations: declaration of the context slot followed
-      // by initialization.
-      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling chained assignment
-    // expressions.
-    frame_->Push(&value);
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is executed,
-      // the code is identical to a normal store (see below).
-      //
-      // We spill the frame in the code below because the direct-frame
-      // access of SlotOperand is potentially unsafe with an unspilled
-      // frame.
-      VirtualFrame::SpilledScope spilled_scope;
-      Comment cmnt(masm_, "[ Init const");
-      __ movq(rcx, SlotOperand(slot, rcx));
-      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-      exit.Branch(not_equal);
-    }
-
-    // We must execute the store.  Storing a variable must keep the (new)
-    // value on the stack. This is necessary for compiling assignment
-    // expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will initialize
-    // consts to 'the hole' value and by doing so, end up calling this code.
-    if (slot->type() == Slot::PARAMETER) {
-      frame_->StoreToParameterAt(slot->index());
-    } else if (slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(slot->index());
-    } else {
-      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
-      //
-      // The use of SlotOperand below is safe for an unspilled frame
-      // because the slot is a context slot.
-      ASSERT(slot->type() == Slot::CONTEXT);
-      frame_->Dup();
-      Result value = frame_->Pop();
-      value.ToRegister();
-      Result start = allocator_->Allocate();
-      ASSERT(start.is_valid());
-      __ movq(SlotOperand(slot, start.reg()), value.reg());
-      // RecordWrite may destroy the value registers.
-      //
-      // TODO(204): Avoid actually spilling when the value is not
-      // needed (probably the common case).
-      frame_->Spill(value.reg());
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
-      // The results start, value, and temp are unused by going out of
-      // scope.
-    }
-
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  Comment cmnt(masm_, "[ Literal");
-  frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
-  UNIMPLEMENTED();
-  // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
-  return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function.  Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
-  DeferredRegExpLiteral(Register boilerplate,
-                        Register literals,
-                        RegExpLiteral* node)
-      : boilerplate_(boilerplate), literals_(literals), node_(node) {
-    set_comment("[ DeferredRegExpLiteral");
-  }
-
-  void Generate();
-
- private:
-  Register boilerplate_;
-  Register literals_;
-  RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
-  // Since the entry is undefined we call the runtime system to
-  // compute the literal.
-  // Literal array (0).
-  __ push(literals_);
-  // Literal index (1).
-  __ Push(Smi::FromInt(node_->literal_index()));
-  // RegExp pattern (2).
-  __ Push(node_->pattern());
-  // RegExp flags (3).
-  __ Push(node_->flags());
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
-  DeferredAllocateInNewSpace(int size,
-                             Register target,
-                             int registers_to_save = 0)
-    : size_(size), target_(target), registers_to_save_(registers_to_save) {
-    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
-    set_comment("[ DeferredAllocateInNewSpace");
-  }
-  void Generate();
-
- private:
-  int size_;
-  Register target_;
-  int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
-  for (int i = 0; i < kNumRegs; i++) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ push(save_register);
-    }
-  }
-  __ Push(Smi::FromInt(size_));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  if (!target_.is(rax)) {
-    __ movq(target_, rax);
-  }
-  for (int i = kNumRegs - 1; i >= 0; i--) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ pop(save_register);
-    }
-  }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  Comment cmnt(masm_, "[ RegExp Literal");
-
-  // Retrieve the literals array and check the allocated entry.  Begin
-  // with a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  Result boilerplate = allocator_->Allocate();
-  ASSERT(boilerplate.is_valid());
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
-  // Check whether we need to materialize the RegExp object.  If so,
-  // jump to the deferred code passing the literals array.
-  DeferredRegExpLiteral* deferred =
-      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
-  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
-  deferred->Branch(equal);
-  deferred->BindExit();
-
-  // Register of boilerplate contains RegExp object.
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
-  DeferredAllocateInNewSpace* allocate_fallback =
-      new DeferredAllocateInNewSpace(size, literals.reg());
-  frame_->Push(&boilerplate);
-  frame_->SpillTop();
-  __ AllocateInNewSpace(size,
-                        literals.reg(),
-                        tmp.reg(),
-                        no_reg,
-                        allocate_fallback->entry_label(),
-                        TAG_OBJECT);
-  allocate_fallback->BindExit();
-  boilerplate = frame_->Pop();
-  // Copy from boilerplate to clone and return clone.
-
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
-    __ movq(FieldOperand(literals.reg(), i), tmp.reg());
-  }
-  frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
-  frame_->Push(&literals);
-  // Literal index.
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant properties.
-  frame_->Push(node->constant_properties());
-  // Should the object literal have fast elements?
-  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
-  Result clone;
-  if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->Push(&clone);
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through.
-      case ObjectLiteral::Property::COMPUTED: {
-        Handle<Object> key(property->key()->handle());
-        if (key->IsSymbol()) {
-          // Duplicate the object as the IC receiver.
-          frame_->Dup();
-          Load(property->value());
-          if (property->emit_store()) {
-            Result ignored =
-                frame_->CallStoreIC(Handle<String>::cast(key), false,
-                                    strict_mode_flag());
-            // A test rax instruction following the store IC call would
-            // indicate the presence of an inlined version of the
-            // store. Add a nop to indicate that there is no such
-            // inlined version.
-            __ nop();
-          } else {
-            frame_->Drop(2);
-          }
-          break;
-        }
-        // Fall through
-      }
-      case ObjectLiteral::Property::PROTOTYPE: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        Load(property->value());
-        if (property->emit_store()) {
-          frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
-          // Ignore the result.
-          Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          frame_->Drop(3);
-        }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(1));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(0));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      default: UNREACHABLE();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  frame_->Push(&literals);
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  frame_->Push(node->constant_elements());
-  int length = node->values()->length();
-  Result clone;
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
-  } else if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-  }
-  frame_->Push(&clone);
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < length; i++) {
-    Expression* value = node->values()->at(i);
-
-    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
-      continue;
-    }
-
-    // The property must be set by generated code.
-    Load(value);
-
-    // Get the property value off the stack.
-    Result prop_value = frame_->Pop();
-    prop_value.ToRegister();
-
-    // Fetch the array literal while leaving a copy on the stack and
-    // use it to get the elements array.
-    frame_->Dup();
-    Result elements = frame_->Pop();
-    elements.ToRegister();
-    frame_->Spill(elements.reg());
-    // Get the elements FixedArray.
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
-    // Update the write barrier for the array address.
-    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_valid());
-    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
-  }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  ASSERT(!in_spilled_code());
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  Result result =
-      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      frame()->Dup();
-    }
-    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame()->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else if (var != NULL) {
-      // The LoadIC stub expects the object in rax.
-      // Freeing rax causes the code generator to load the global into it.
-      frame_->Spill(rax);
-      LoadGlobal();
-    } else {
-      frame()->Dup();
-    }
-    Result value = EmitNamedLoad(name, var != NULL);
-    frame()->Push(&value);
-    Load(node->value());
-
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    Result value = frame()->Pop();
-    frame()->Push(prop->obj());
-    frame()->Push(&value);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  Result answer = EmitNamedStore(name, is_contextual);
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment.  Swap
-      // the receiver and the value of the assignment expression.
-      Result result = frame()->Pop();
-      Result receiver = frame()->Pop();
-      frame()->Push(&result);
-      frame()->Push(&receiver);
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame()->PushElementAt(1);
-    frame()->PushElementAt(1);
-    Result value = EmitKeyedLoad();
-    frame()->Push(&value);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  Result answer = EmitKeyedStore(prop->key()->type());
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Result result = frame()->Pop();
-    Result receiver = frame()->Pop();
-    frame()->Push(&result);
-    frame()->Push(&receiver);
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame()->Push(&result);
-  }
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  Comment cmnt(masm_, "[ Throw");
-  Load(node->exception());
-  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  Comment cmnt(masm_, "[ Property");
-  Reference property(this, node);
-  property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  Comment cmnt(masm_, "[ Call");
-
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Check if the function is a variable or a property.
-  Expression* function = node->expression();
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare the stack for the call to the resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->Push(FACTORY->undefined_value());
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Result to hold the result of the function resolution and the
-    // final result of the eval call.
-    Result result;
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                                     NOT_INSIDE_TYPEOF,
-                                                     &slow);
-      frame_->Push(&fun);
-      if (arg_count > 0) {
-        frame_->PushElementAt(arg_count);
-      } else {
-        frame_->Push(FACTORY->undefined_value());
-      }
-      frame_->PushParameterAt(-1);
-
-      // Push the strict mode flag.
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-      // Resolve the call.
-      result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump(&result);
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    frame_->PushElementAt(arg_count + 1);
-    if (arg_count > 0) {
-      frame_->PushElementAt(arg_count);
-    } else {
-      frame_->Push(FACTORY->undefined_value());
-    }
-    frame_->PushParameterAt(-1);
-
-    // Push the strict mode flag.
-    frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-    // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind(&result);
-
-    // The runtime call returns a pair of values in rax (function) and
-    // rdx (receiver). Touch up the stack with the right values.
-    Result receiver = allocator_->Allocate(rdx);
-    frame_->SetElementAt(arg_count + 1, &result);
-    frame_->SetElementAt(arg_count, &receiver);
-    receiver.Unuse();
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    result = frame_->CallStub(&call_function, arg_count + 1);
-
-    // Restore the context and overwrite the function on the stack with
-    // the result.
-    frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &result);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Push the name of the function on the frame.
-    frame_->Push(var->name());
-
-    // Call the IC initialization code.
-    CodeForSourcePosition(node->position());
-    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
-                                       arg_count,
-                                       loop_nesting());
-    frame_->RestoreContextRegister();
-    // Replace the function on the stack with the result.
-    frame_->Push(&result);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-    Result function;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &function,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Load the function from the context.  Sync the frame so we can
-    // push the arguments directly into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(var->name());
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // The runtime call returns a pair of values in rax and rdx.  The
-    // looked-up function is in rax and the receiver is in rdx.  These
-    // register references are not ref counted here.  We spill them
-    // eagerly since they are arguments to an inevitable call (and are
-    // not sharable by the arguments).
-    ASSERT(!allocator()->is_used(rax));
-    frame_->EmitPush(rax);
-
-    // Load the receiver.
-    ASSERT(!allocator()->is_used(rdx));
-    frame_->EmitPush(rdx);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind(&function);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      call.Bind();
-    }
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Push the name of the function onto the frame.
-        frame_->Push(name);
-
-        // Call the IC initialization code.
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                           arg_count,
-                                           loop_nesting());
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the function to call from the property through a reference.
-      if (property->is_synthetic()) {
-        Reference ref(this, property, false);
-        ref.GetValue();
-        // Use global object as receiver.
-        LoadGlobalReceiver();
-       // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the name of the function.
-        Load(property->key());
-
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Result key = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&key);
-        frame_->Push(&receiver);
-        key.Unuse();
-        receiver.Unuse();
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Place the key on top of stack and call the IC initialization code.
-        frame_->PushElementAt(arg_count + 1);
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
-                                                arg_count,
-                                                loop_nesting());
-        frame_->Drop();  // Drop the key still on the stack.
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-    }
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver();
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-  }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Result result = frame_->CallConstructor(arg_count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  value.Unuse();
-  destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // Conditionally generate a log call.
-  // Args:
-  //   0 (literal string): The type of logging (corresponds to the flags).
-  //     This is used to determine whether or not to generate the log call.
-  //   1 (string): Format string.  Access the string at argument index 2
-  //     with '%2s' (see Logger::LogRuntime for all the formats).
-  //   2 (array): Arguments to the format string.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
-  value.Unuse();
-  destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ LoadRoot(result_, Heap::kNanValueRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need two extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object.reg(),
-                                   index.reg(),
-                                   scratch.reg(),
-                                   result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Result code = frame_->Pop();
-  code.ToRegister();
-  ASSERT(code.is_valid());
-
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code.reg(), result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ Move(result_, Smi::FromInt(0));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need three extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch1 = allocator()->Allocate();
-  ASSERT(scratch1.is_valid());
-  Result scratch2 = allocator()->Allocate();
-  ASSERT(scratch2.is_valid());
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object.reg(),
-                               index.reg(),
-                               scratch1.reg(),
-                               scratch2.reg(),
-                               result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a JS array or not.
-  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a regexp.
-  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-
-  __ Move(kScratchRegister, FACTORY->null_value());
-  __ cmpq(obj.reg(), kScratchRegister);
-  destination()->true_target()->Branch(equal);
-
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-          Immediate(1 << Map::kIsUndetectable));
-  destination()->false_target()->Branch(not_zero);
-  __ movzxbq(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
-  __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
-  destination()->false_target()->Branch(below);
-  __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
-  obj.Unuse();
-  destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // Check that this is an object.
-  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-      __ Assert(equal, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
-    __ j(equal, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ movq(map_result_,
-           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch1_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
-    __ lea(scratch1_,
-           Operand(
-               map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
-    // Calculate location of the first key name.
-    __ addq(map_result_,
-            Immediate(FixedArray::kHeaderSize +
-                      DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(scratch2_, FieldOperand(map_result_, 0));
-    __ Cmp(scratch2_, FACTORY->value_of_symbol());
-    __ j(equal, &false_result);
-    __ addq(map_result_, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(map_result_, scratch1_);
-    __ j(not_equal, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
-    __ testq(scratch1_, Immediate(kSmiTagMask));
-    __ j(zero, &false_result);
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ movq(scratch2_,
-            Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ movq(scratch2_,
-            FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ cmpq(scratch1_,
-            ContextOperand(
-                scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ j(not_equal, &false_result);
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ Set(map_result_, 1);
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ Set(map_result_, 0);
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();  // Pop the string wrapper.
-  obj.ToRegister();
-  ASSERT(obj.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj.reg());
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Result map_result = allocator()->Allocate();
-  ASSERT(map_result.is_valid());
-  __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  destination()->true_target()->Branch(not_zero);
-
-  // We need an additional two scratch registers for the deferred code.
-  Result temp1 = allocator()->Allocate();
-  ASSERT(temp1.is_valid());
-  Result temp2 = allocator()->Allocate();
-  ASSERT(temp2.is_valid());
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
-  deferred->Branch(zero);
-  deferred->BindExit();
-  __ testq(map_result.reg(), map_result.reg());
-  obj.Unuse();
-  map_result.Unuse();
-  temp1.Unuse();
-  temp2.Unuse();
-  destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  obj.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ movzxbl(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
-  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
-  obj.Unuse();
-  destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  // Get the frame pointer for the calling frame.
-  Result fp = allocator()->Allocate();
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &check_frame_marker);
-  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-         Smi::FromInt(StackFrame::CONSTRUCT));
-  fp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Result fp = allocator_->Allocate();
-  Result result = allocator_->Allocate();
-  ASSERT(fp.is_valid() && result.is_valid());
-
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ movq(result.reg(),
-          Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  result.set_type_info(TypeInfo::Smi());
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(result.reg());
-  }
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave, null, function, non_function_constructor;
-  Load(args->at(0));  // Load the object.
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  frame_->Spill(obj.reg());
-
-  // If the object is a smi, we return null.
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  null.Branch(is_smi);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-
-  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(below);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
-  function.Branch(equal);
-
-  // Check if the constructor in the map is a function.
-  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  non_function_constructor.Branch(not_equal);
-
-  // The obj register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(),
-                       SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->Push(&obj);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  frame_->Push(FACTORY->function_class_symbol());
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  frame_->Push(FACTORY->Object_symbol());
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  frame_->Push(FACTORY->null_value());
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  frame_->Dup();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  ASSERT(object.is_valid());
-  // if (object->IsSmi()) return object.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // if (!object->IsJSValue()) return object.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
-  leave.Branch(not_equal);
-  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
-  object.Unuse();
-  frame_->SetElementAt(0, &temp);
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  Result value = frame_->Pop();
-  Result object = frame_->Pop();
-  value.ToRegister();
-  object.ToRegister();
-
-  // if (object->IsSmi()) return value.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi, &value);
-
-  // It is a heap object - get its map.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  // if (!object->IsJSValue()) return value.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
-  leave.Branch(not_equal, &value);
-
-  // Store the value.
-  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  Result duplicate_value = allocator_->Allocate();
-  ASSERT(duplicate_value.is_valid());
-  __ movq(duplicate_value.reg(), value.reg());
-  // The object register is also overwritten by the write barrier and
-  // possibly aliased in the frame.
-  frame_->Spill(object.reg());
-  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
-                 scratch.reg());
-  object.Unuse();
-  scratch.Unuse();
-  duplicate_value.Unuse();
-
-  // Leave.
-  leave.Bind(&value);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in rdx and the formal
-  // parameter count in rax.
-  Load(args->at(0));
-  Result key = frame_->Pop();
-  // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  Result result = frame_->CallStub(&stub, &key, &count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-  right.ToRegister();
-  left.ToRegister();
-  __ cmpq(right.reg(), left.reg());
-  right.Unuse();
-  left.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  // RBP value is aligned, so it should be tagged as a smi (without necesarily
-  // being padded as a smi, so it should not be treated as a smi.).
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  Result rbp_as_smi = allocator_->Allocate();
-  ASSERT(rbp_as_smi.is_valid());
-  __ movq(rbp_as_smi.reg(), rbp);
-  frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  frame_->SpillAll();
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ movq(rbx, rax);
-
-  __ bind(&heapnumber_allocated);
-
-  // Return a random uint32 number in rax.
-  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
-
-  // Convert 32 random bits in rax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-  __ movd(xmm1, rcx);
-  __ movd(xmm0, rax);
-  __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
-  __ subsd(xmm0, xmm1);
-  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
-  __ movq(rax, rbx);
-  Result result = allocator_->Allocate(rax);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  Result answer = frame_->CallStub(&stub, 3);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 4);
-
-  // Load the arguments on the stack and call the runtime system.
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-  RegExpExecStub stub;
-  Result result = frame_->CallStub(&stub, 4);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-  RegExpConstructResultStub stub;
-  Result result = frame_->CallStub(&stub, 3);
-  frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst,
-                      Register cache,
-                      Register key,
-                      Register scratch)
-      : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;    // on invocation index of finger (as int32), on exit
-                    // holds value being looked up.
-  Register cache_;  // instance of JSFunctionResultCache.
-  Register key_;    // key being looked up.
-  Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|.  |index| is int32.
-static Operand ArrayElement(Register array,
-                            Register index,
-                            int additional_offset = 0) {
-  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
-  return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
-  Label first_loop, search_further, second_loop, cache_miss;
-
-  Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
-  Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
-  // Check the cache from finger to start of the cache.
-  __ bind(&first_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, kEntriesIndexImm);
-  __ j(less, &search_further);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &first_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&search_further);
-
-  // Check the cache from end of cache up to finger.
-  __ SmiToInteger32(dst_,
-                    FieldOperand(cache_,
-                                 JSFunctionResultCache::kCacheSizeOffset));
-  __ SmiToInteger32(scratch_,
-                    FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
-  __ bind(&second_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, scratch_);
-  __ j(less_equal, &cache_miss);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &second_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&cache_miss);
-  __ push(cache_);  // store a reference to cache
-  __ push(key_);  // store a key
-  __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ push(key_);
-  // On x64 function must be in rdi.
-  __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
-  ParameterCount expected(1);
-  __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
-  // Find a place to put new cached value into.
-  Label add_new_entry, update_cache;
-  __ movq(rcx, Operand(rsp, kPointerSize));  // restore the cache
-  // Possible optimization: cache size is constant for the given cache
-  // so technically we could use a constant here.  However, if we have
-  // cache miss this optimization would hardly matter much.
-
-  // Check if we could add new entry to cache.
-  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ SmiToInteger32(r9,
-                    FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
-  __ cmpl(rbx, r9);
-  __ j(greater, &add_new_entry);
-
-  // Check if we could evict entry after finger.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ addl(rdx, kEntrySizeImm);
-  Label forward;
-  __ cmpl(rbx, rdx);
-  __ j(greater, &forward);
-  // Need to wrap over the cache.
-  __ movl(rdx, kEntriesIndexImm);
-  __ bind(&forward);
-  __ movl(r9, rdx);
-  __ jmp(&update_cache);
-
-  __ bind(&add_new_entry);
-  // r9 holds cache size as int32.
-  __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
-  // Update the cache itself.
-  // r9 holds the index as int32.
-  __ bind(&update_cache);
-  __ pop(rbx);  // restore the key
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
-  // Store key.
-  __ movq(ArrayElement(rcx, r9), rbx);
-  __ RecordWrite(rcx, 0, rbx, r9);
-
-  // Store value.
-  __ pop(rcx);  // restore the cache.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ incl(rdx);
-  // Backup rax, because the RecordWrite macro clobbers its arguments.
-  __ movq(rbx, rax);
-  __ movq(ArrayElement(rcx, rdx), rax);
-  __ RecordWrite(rcx, 0, rbx, rdx);
-
-  if (!dst_.is(rax)) {
-    __ movq(dst_, rax);
-  }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      Isolate::Current()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->Push(FACTORY->undefined_value());
-    return;
-  }
-
-  Load(args->at(1));
-  Result key = frame_->Pop();
-  key.ToRegister();
-
-  Result cache = allocator()->Allocate();
-  ASSERT(cache.is_valid());
-  __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
-  __ movq(cache.reg(),
-          ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
-                                                          cache.reg(),
-                                                          key.reg(),
-                                                          scratch.reg());
-
-  const int kFingerOffset =
-      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  // tmp.reg() now holds finger offset as a smi.
-  __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
-  __ cmpq(key.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize));
-  deferred->Branch(not_equal);
-  __ movq(tmp.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize + kPointerSize));
-
-  deferred->BindExit();
-  frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and jump to the runtime.
-  Load(args->at(0));
-
-  NumberToStringStub stub;
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  Result index2 = frame_->Pop();
-  index2.ToRegister();
-
-  Result index1 = frame_->Pop();
-  index1.ToRegister();
-
-  Result object = frame_->Pop();
-  object.ToRegister();
-
-  Result tmp1 = allocator()->Allocate();
-  tmp1.ToRegister();
-  Result tmp2 = allocator()->Allocate();
-  tmp2.ToRegister();
-
-  frame_->Spill(object.reg());
-  frame_->Spill(index1.reg());
-  frame_->Spill(index2.reg());
-
-  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
-                                                            index1.reg(),
-                                                            index2.reg());
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
-  deferred->Branch(not_equal);
-  __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
-           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(not_zero);
-
-  // Check the object's elements are in fast case and writable.
-  __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  deferred->Branch(not_equal);
-
-  // Check that both indices are smis.
-  Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
-  deferred->Branch(NegateCondition(both_smi));
-
-  // Check that both indices are valid.
-  __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ SmiCompare(tmp2.reg(), index1.reg());
-  deferred->Branch(below_equal);
-  __ SmiCompare(tmp2.reg(), index2.reg());
-  deferred->Branch(below_equal);
-
-  // Bring addresses into index1 and index2.
-  __ SmiToInteger32(index1.reg(), index1.reg());
-  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
-                                    index1.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-  __ SmiToInteger32(index2.reg(), index2.reg());
-  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
-                                    index2.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-
-  // Swap elements.
-  __ movq(object.reg(), Operand(index1.reg(), 0));
-  __ movq(tmp2.reg(), Operand(index2.reg(), 0));
-  __ movq(Operand(index2.reg(), 0), object.reg());
-  __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
-  Label done;
-  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are smis
-  // (or them and test against Smi mask.)
-
-  __ movq(tmp2.reg(), tmp1.reg());
-  __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
-  __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
-  __ bind(&done);
-
-  deferred->BindExit();
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  Result result = frame_->CallJSFunction(n_args);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Label allocate_return;
-  // Load the two operands while leaving the values on the frame.
-  frame()->Dup();
-  Result exponent = frame()->Pop();
-  exponent.ToRegister();
-  frame()->Spill(exponent.reg());
-  frame()->PushElementAt(1);
-  Result base = frame()->Pop();
-  base.ToRegister();
-  frame()->Spill(base.reg());
-
-  Result answer = allocator()->Allocate();
-  ASSERT(answer.is_valid());
-  ASSERT(!exponent.reg().is(base.reg()));
-  JumpTarget call_runtime;
-
-  // Save 1 in xmm3 - we need this several times later on.
-  __ movl(answer.reg(), Immediate(1));
-  __ cvtlsi2sd(xmm3, answer.reg());
-
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
-  __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
-  // Optimized version when y is an integer.
-  Label powi;
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&powi);
-  // exponent is smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // Optimized version of pow if y is an integer.
-  __ bind(&powi);
-  __ SmiToInteger32(exponent.reg(), exponent.reg());
-
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ movl(base.reg(), exponent.reg());
-
-  // Get absolute value of exponent.
-  Label no_neg;
-  __ cmpl(exponent.reg(), Immediate(0));
-  __ j(greater_equal, &no_neg);
-  __ negl(exponent.reg());
-  __ bind(&no_neg);
-
-  // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
-  __ bind(&while_true);
-  __ shrl(exponent.reg(), Immediate(1));
-  __ j(not_carry, &no_multiply);
-  __ mulsd(xmm1, xmm0);
-  __ bind(&no_multiply);
-  __ testl(exponent.reg(), exponent.reg());
-  __ mulsd(xmm0, xmm0);
-  __ j(not_zero, &while_true);
-
-  // x has the original value of y - if y is negative return 1/result.
-  __ testl(base.reg(), base.reg());
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ movl(answer.reg(), Immediate(0x7FB00000));
-  __ movd(xmm0, answer.reg());
-  __ cvtss2sd(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  call_runtime.Branch(equal);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  call_runtime.Branch(parity_even);
-
-  Label base_not_smi;
-  Label handle_special_cases;
-  __ JumpIfNotSmi(base.reg(), &base_not_smi);
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&handle_special_cases);
-  __ bind(&base_not_smi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
-  __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  call_runtime.Branch(greater_equal);
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ movl(answer.reg(), Immediate(0xBF000000));
-  __ movd(xmm2, answer.reg());
-  __ cvtss2sd(xmm2, xmm2);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  call_runtime.Branch(not_equal);
-
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-
-  JumpTarget done;
-  Label failure, success;
-  __ bind(&allocate_return);
-  // Make a copy of the frame to enable us to handle allocation
-  // failure after the JumpTarget jump.
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
-  __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
-  // Remove the two original values from the frame - we only need those
-  // in the case where we branch to runtime.
-  frame()->Drop(2);
-  exponent.Unuse();
-  base.Unuse();
-  done.Jump(&answer);
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  // If we experience an allocation failure we branch to runtime.
-  __ bind(&failure);
-  call_runtime.Bind();
-  answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
-  done.Bind(&answer);
-  frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-
-  // Leave original value on the frame if we need to call runtime.
-  frame()->Dup();
-  Result result = frame()->Pop();
-  result.ToRegister();
-  frame()->Spill(result.reg());
-  Label runtime;
-  Label non_smi;
-  Label load_done;
-  JumpTarget end;
-
-  __ JumpIfNotSmi(result.reg(), &non_smi);
-  __ SmiToInteger32(result.reg(), result.reg());
-  __ cvtlsi2sd(xmm0, result.reg());
-  __ jmp(&load_done);
-  __ bind(&non_smi);
-  __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &runtime);
-  __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
-  __ bind(&load_done);
-  __ sqrtsd(xmm0, xmm0);
-  // A copy of the virtual frame to allow us to go to runtime after the
-  // JumpTarget jump.
-  Result scratch = allocator()->Allocate();
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
-  __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
-  frame()->Drop(1);
-  scratch.Unuse();
-  end.Jump(&result);
-  // We only branch to runtime if we have an allocation error.
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  __ bind(&runtime);
-  result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
-  end.Bind(&result);
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right_res = frame_->Pop();
-  Result left_res = frame_->Pop();
-  right_res.ToRegister();
-  left_res.ToRegister();
-  Result tmp_res = allocator()->Allocate();
-  ASSERT(tmp_res.is_valid());
-  Register right = right_res.reg();
-  Register left = left_res.reg();
-  Register tmp = tmp_res.reg();
-  right_res.Unuse();
-  left_res.Unuse();
-  tmp_res.Unuse();
-  __ cmpq(left, right);
-  destination()->true_target()->Branch(equal);
-  // Fail if either is a non-HeapObject.
-  Condition either_smi =
-      masm()->CheckEitherSmi(left, right, tmp);
-  destination()->false_target()->Branch(either_smi);
-  __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
-  __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
-          Immediate(JS_REGEXP_TYPE));
-  destination()->false_target()->Branch(not_equal);
-  __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  destination()->false_target()->Branch(not_equal);
-  __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
-  __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result string = frame_->Pop();
-  string.ToRegister();
-
-  Result number = allocator()->Allocate();
-  ASSERT(number.is_valid());
-  __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
-  __ IndexFromHash(number.reg(), number.reg());
-  string.Unuse();
-  frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  if (CheckForInlineRuntimeCall(node)) {
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Push the builtins object found in the current global object.
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), GlobalObjectOperand());
-    __ movq(temp.reg(),
-            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
-    frame_->Push(&temp);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    frame_->Push(node->name());
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                       arg_count,
-                                       loop_nesting_);
-    frame_->RestoreContextRegister();
-    frame_->Push(&answer);
-  } else {
-    // Call the C runtime function.
-    Result answer = frame_->CallRuntime(function, arg_count);
-    frame_->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    // Swap the true and false targets but keep the same actual label
-    // as the fall through.
-    destination()->Invert();
-    LoadCondition(node->expression(), destination(), true);
-    // Swap the labels back.
-    destination()->Invert();
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
-      frame_->Push(&answer);
-      return;
-    }
-
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->Push(variable->name());
-        frame_->Push(Smi::FromInt(kNonStrictMode));
-        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
-                                              CALL_FUNCTION, 3);
-        frame_->Push(&answer);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Call the runtime to delete from the context holding the named
-        // variable.  Sync the virtual frame eagerly so we can push the
-        // arguments directly into place.
-        frame_->SyncRange(0, frame_->element_count() - 1);
-        frame_->EmitPush(rsi);
-        frame_->EmitPush(variable->name());
-        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->Push(&answer);
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->Push(FACTORY->false_value());
-      }
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->SetElementAt(0, FACTORY->true_value());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(&answer);
-
-  } else if (op == Token::VOID) {
-    Expression* expression = node->expression();
-    if (expression && expression->AsLiteral() && (
-        expression->AsLiteral()->IsTrue() ||
-        expression->AsLiteral()->IsFalse() ||
-        expression->AsLiteral()->handle()->IsNumber() ||
-        expression->AsLiteral()->handle()->IsString() ||
-        expression->AsLiteral()->handle()->IsJSRegExp() ||
-        expression->AsLiteral()->IsNull())) {
-      // Omit evaluating the value of the primitive literal.
-      // It will be discarded anyway, and can have no side effect.
-      frame_->Push(FACTORY->undefined_value());
-    } else {
-      Load(node->expression());
-      frame_->SetElementAt(0, FACTORY->undefined_value());
-    }
-
-  } else {
-    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-    UnaryOverwriteMode overwrite =
-        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-    bool no_negative_zero = node->expression()->no_negative_zero();
-    Load(node->expression());
-    switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
-      case Token::SUB: {
-        GenericUnaryOpStub stub(
-            Token::SUB,
-            overwrite,
-            NO_UNARY_FLAGS,
-            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-        Result operand = frame_->Pop();
-        Result answer = frame_->CallStub(&stub, &operand);
-        answer.set_type_info(TypeInfo::Number());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::BIT_NOT: {
-        // Smi check.
-        JumpTarget smi_label;
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        operand.ToRegister();
-
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        smi_label.Branch(is_smi, &operand);
-
-        GenericUnaryOpStub stub(Token::BIT_NOT,
-                                overwrite,
-                                NO_UNARY_SMI_CODE_IN_STUB);
-        Result answer = frame_->CallStub(&stub, &operand);
-        continue_label.Jump(&answer);
-
-        smi_label.Bind(&answer);
-        answer.ToRegister();
-        frame_->Spill(answer.reg());
-        __ SmiNot(answer.reg(), answer.reg());
-        continue_label.Bind(&answer);
-        answer.set_type_info(TypeInfo::Smi());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::ADD: {
-        // Smi check.
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        TypeInfo operand_info = operand.type_info();
-        operand.ToRegister();
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        continue_label.Branch(is_smi, &operand);
-        frame_->Push(&operand);
-        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
-                                              CALL_FUNCTION, 1);
-
-        continue_label.Bind(&answer);
-        if (operand_info.IsSmi()) {
-          answer.set_type_info(TypeInfo::Smi());
-        } else if (operand_info.IsInteger32()) {
-          answer.set_type_info(TypeInfo::Integer32());
-        } else {
-          answer.set_type_info(TypeInfo::Number());
-        }
-        frame_->Push(&answer);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub.  The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
-  DeferredPrefixCountOperation(Register dst,
-                               bool is_increment,
-                               TypeInfo input_type)
-      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number.  Update the original value in
-// old.  Call the specialized add or subtract stub.  The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
-  DeferredPostfixCountOperation(Register dst,
-                                Register old,
-                                bool is_increment,
-                                TypeInfo input_type)
-      : dst_(dst),
-        old_(old),
-        is_increment_(is_increment),
-        input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Register old_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    __ push(dst_);  // Save the input to use as the old value.
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    __ push(rax);  // Save the result of ToNumber to use as the old value.
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  Comment cmnt(masm_, "[ CountOperation");
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
-  // Postfix operations need a stack slot under the reference to hold
-  // the old value while the new value is being stored.  This is so that
-  // in the case that storing the new value requires a call, the old
-  // value will be in the frame to be spilled.
-  if (is_postfix) frame_->Push(Smi::FromInt(0));
-
-  // A constant reference is not saved to, so the reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!is_postfix) frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    target.TakeValue();
-
-    Result new_value = frame_->Pop();
-    new_value.ToRegister();
-
-    Result old_value;  // Only allocated in the postfix case.
-    if (is_postfix) {
-      // Allocate a temporary to preserve the old value.
-      old_value = allocator_->Allocate();
-      ASSERT(old_value.is_valid());
-      __ movq(old_value.reg(), new_value.reg());
-
-      // The return value for postfix operations is ToNumber(input).
-      // Keep more precise type info if the input is some kind of
-      // number already. If the input is not a number we have to wait
-      // for the deferred code to convert it.
-      if (new_value.type_info().IsNumber()) {
-        old_value.set_type_info(new_value.type_info());
-      }
-    }
-    // Ensure the new value is writable.
-    frame_->Spill(new_value.reg());
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment,
-                                                   new_value.type_info());
-    } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment,
-                                                  new_value.type_info());
-    }
-
-    if (new_value.is_smi()) {
-      if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
-    } else {
-      __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
-    }
-    if (is_increment) {
-      __ SmiAddConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    } else {
-      __ SmiSubConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    }
-    deferred->BindExit();
-
-    // Postfix count operations return their input converted to
-    // number. The case when the input is already a number is covered
-    // above in the allocation code for old_value.
-    if (is_postfix && !new_value.type_info().IsNumber()) {
-      old_value.set_type_info(TypeInfo::Number());
-    }
-
-    new_value.set_type_info(TypeInfo::Number());
-
-    // Postfix: store the old value in the allocated slot under the
-    // reference.
-    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
-    frame_->Push(&new_value);
-    // Non-constant: update the reference.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
-  }
-
-  // Postfix: drop the new value and use the old.
-  if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not
-  // control flow), we force the right hand side to do the same. This
-  // is necessary because we assume that if we get control flow on the
-  // last path out of an expression we got it on all paths.
-  if (node->op() == Token::AND) {
-    JumpTarget is_true;
-    ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The current false target was used as the fall-through.  If
-      // there are no dangling jumps to is_true then the left
-      // subexpression was unconditionally false.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_true.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current false target was a forward jump then we have a
-        // valid frame, we have just bound the false target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->false_target()->Unuse();
-          destination()->false_target()->Jump();
-        }
-        is_true.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have actually just jumped to or bound the current false
-        // target but the current control destination is not marked as
-        // used.
-        destination()->Use(false);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_true
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_true
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&pop_and_continue, &exit, true);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    JumpTarget is_false;
-    ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.true_was_fall_through()) {
-      // The current true target was used as the fall-through.  If
-      // there are no dangling jumps to is_false then the left
-      // subexpression was unconditionally true.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_false.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current true target was a forward jump then we have a
-        // valid frame, we have just bound the true target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->true_target()->Unuse();
-          destination()->true_target()->Jump();
-        }
-        is_false.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have just jumped to or bound the current true target but
-        // the current control destination is not marked as used.
-        destination()->Use(true);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_false
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_false
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&exit, &pop_and_continue, false);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-  }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else {
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    OverwriteMode overwrite_mode = NO_OVERWRITE;
-    if (node->left()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_LEFT;
-    } else if (node->right()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_RIGHT;
-    }
-
-    if (node->left()->IsTrivial()) {
-      Load(node->right());
-      Result right = frame_->Pop();
-      frame_->Push(node->left());
-      frame_->Push(&right);
-    } else {
-      Load(node->left());
-      Load(node->right());
-    }
-    GenericBinaryOperation(node, overwrite_mode);
-  }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
-    // Load the operand and move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Result answer = frame_->Pop();
-    answer.ToRegister();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->true_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable string object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
-      answer.Unuse();
-      destination()->Split(below);  // Unsigned byte comparison needed.
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
-      destination()->true_target()->Branch(equal);
-      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      answer.Unuse();
-      destination()->Split(not_zero);
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
-      destination()->true_target()->Branch(equal);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->object_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      // Regular expressions are typeof == 'function', not 'object'.
-      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
-      destination()->false_target()->Branch(equal);
-
-      // It can be an undetectable object.
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
-      destination()->false_target()->Branch(below);
-      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
-      answer.Unuse();
-      destination()->Split(below_equal);
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      answer.Unuse();
-      destination()->Goto(false);
-    }
-    return;
-  }
-
-  Condition cc = no_condition;
-  bool strict = false;
-  switch (op) {
-    case Token::EQ_STRICT:
-      strict = true;
-      // Fall through
-    case Token::EQ:
-      cc = equal;
-      break;
-    case Token::LT:
-      cc = less;
-      break;
-    case Token::GT:
-      cc = greater;
-      break;
-    case Token::LTE:
-      cc = less_equal;
-      break;
-    case Token::GTE:
-      cc = greater_equal;
-      break;
-    case Token::IN: {
-      Load(left);
-      Load(right);
-      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
-      frame_->Push(&answer);  // push the result
-      return;
-    }
-    case Token::INSTANCEOF: {
-      Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      Result answer = frame_->CallStub(&stub, 2);
-      answer.ToRegister();
-      __ testq(answer.reg(), answer.reg());
-      answer.Unuse();
-      destination()->Split(zero);
-      return;
-    }
-    default:
-      UNREACHABLE();
-  }
-
-  if (left->IsTrivial()) {
-    Load(right);
-    Result right_result = frame_->Pop();
-    frame_->Push(left);
-    frame_->Push(&right_result);
-  } else {
-    Load(left);
-    Load(right);
-  }
-
-  Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Result operand = frame_->Pop();
-  operand.ToRegister();
-  __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
-  if (node->is_strict()) {
-    operand.Unuse();
-    destination()->Split(equal);
-  } else {
-    // The 'null' value is only equal to 'undefined' if using non-strict
-    // comparisons.
-    destination()->true_target()->Branch(equal);
-    __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
-    destination()->true_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(operand.reg());
-    destination()->false_target()->Branch(is_smi);
-
-    // It can be an undetectable object.
-    // Use a scratch register in preference to spilling operand.reg().
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(),
-            FieldOperand(operand.reg(), HeapObject::kMapOffset));
-    __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-             Immediate(1 << Map::kIsUndetectable));
-    temp.Unuse();
-    operand.Unuse();
-    destination()->Split(not_zero);
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
-      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
-      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
-      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
-      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
-      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
-      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
-      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
-      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
-      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.  The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceGetNamedValue(Register dst,
-                                 Register receiver,
-                                 Handle<String> name)
-      : dst_(dst), receiver_(receiver),  name_(name) {
-    set_comment("[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  if (!receiver_.is(rax)) {
-    __ movq(rax, receiver_);
-  }
-  __ Move(rcx, name_);
-  Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a test rax instruction to indicate
-  // that the inobject property case was inlined.
-  //
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use masm_-> instead of the __ macro since the
-  // latter can't return a value.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->named_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetKeyedValue(Register dst,
-                                          Register receiver,
-                                          Register key)
-      : dst_(dst), receiver_(receiver), key_(key) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rax)) {
-      __ movq(rax, key_);
-    }  // else do nothing.
-  } else if (receiver_.is(rax)) {
-    if (key_.is(rdx)) {
-      __ xchg(rax, rdx);
-    } else if (key_.is(rax)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rax, key_);
-    }
-  } else if (key_.is(rax)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rax, key_);
-    __ movq(rdx, receiver_);
-  }
-  // Calculate the delta from the IC call instruction to the map check
-  // movq instruction in the inlined version.  This delta is stored in
-  // a test(rax, delta) instruction after the call so that we can find
-  // it in the IC initialization code and patch the movq instruction.
-  // This means that we cannot allow test instructions after calls to
-  // KeyedLoadIC stubs in other places.
-  Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  // TODO(X64): Consider whether it's worth switching the test to a
-  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
-  // be generated normally.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  Label patch_site_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
-  // Move value, receiver, and key to registers rax, rdx, and rcx, as
-  // the IC stub expects.
-  // Move value to rax, using xchg if the receiver or key is in rax.
-  if (!value_.is(rax)) {
-    if (!receiver_.is(rax) && !key_.is(rax)) {
-      __ movq(rax, value_);
-    } else {
-      __ xchg(rax, value_);
-      // Update receiver_ and key_ if they are affected by the swap.
-      if (receiver_.is(rax)) {
-        receiver_ = value_;
-      } else if (receiver_.is(value_)) {
-        receiver_ = rax;
-      }
-      if (key_.is(rax)) {
-        key_ = value_;
-      } else if (key_.is(value_)) {
-        key_ = rax;
-      }
-    }
-  }
-  // Value is now in rax. Its original location is remembered in value_,
-  // and the value is restored to value_ before returning.
-  // The variables receiver_ and key_ are not preserved.
-  // Move receiver and key to rdx and rcx, swapping if necessary.
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rcx)) {
-      __ movq(rcx, key_);
-    }  // Else everything is already in the right place.
-  } else if (receiver_.is(rcx)) {
-    if (key_.is(rdx)) {
-      __ xchg(rcx, rdx);
-    } else if (key_.is(rcx)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rcx, key_);
-    }
-  } else if (key_.is(rcx)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rcx, key_);
-    __ movq(rdx, receiver_);
-  }
-
-  // Call the IC stub.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                    : Builtins::kKeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instructions (initial movq)
-  // to the test instruction.  We use masm_-> directly here instead of the
-  // __ macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC).
-  if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Do not inline the inobject property case for loads from the global
-  // object.  Also do not inline for unoptimized code.  This saves time
-  // in the code generator.  Unoptimized code is toplevel code or code
-  // that is not in a loop.
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    frame()->Push(name);
-
-    RelocInfo::Mode mode = is_contextual
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    result = frame()->CallLoadIC(mode);
-    // A test rax instruction following the call signals that the
-    // inobject property case was inlined.  Ensure that there is not
-    // a test rax instruction here.
-    __ nop();
-  } else {
-    // Inline the inobject property case.
-    Comment cmnt(masm(), "[ Inlined named property load");
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-
-    // r12 is now a reserved register, so it cannot be the receiver.
-    // If it was, the distance to the fixup location would not be constant.
-    ASSERT(!receiver.reg().is(r12));
-
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
-    // Check that the receiver is a heap object.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    __ bind(deferred->patch_site());
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed
-    // size which allows the assert below to succeed and patching to work.
-    // Don't use deferred->Branch(...), since that might add coverage code.
-    masm()->j(not_equal, deferred->entry_label());
-
-    // The delta from the patch label to the load offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-           LoadIC::kOffsetToLoadInstruction);
-    // The initial (invalid) offset has to be large enough to force
-    // a 32-bit instruction encoding to allow patching with an
-    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->named_load_inline(), 1);
-    deferred->BindExit();
-  }
-  ASSERT(frame()->height() == original_height - 1);
-  return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-      result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-      // A test rax instruction following the call signals that the inobject
-      // property case was inlined.  Ensure that there is not a test rax
-      // instruction here.
-      __ nop();
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-    Label patch_site;
-
-    // Get the value and receiver from the stack.
-    Result value = frame()->Pop();
-    value.ToRegister();
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    // Allocate result register.
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
-    // Cannot use r12 for receiver, because that changes
-    // the distance between a call and a fixup location,
-    // due to a special encoding of r12 as r/m in a ModR/M byte.
-    if (receiver.reg().is(r12)) {
-      frame()->Spill(receiver.reg());  // It will be overwritten with result.
-      // Swap receiver and value.
-      __ movq(result.reg(), receiver.reg());
-      Result temp = receiver;
-      receiver = result;
-      result = temp;
-    }
-
-    // Check that the receiver is a heap object.
-    Condition is_smi = masm()->CheckSmi(receiver.reg());
-    slow.Branch(is_smi, &value, &receiver);
-
-    // This is the map check instruction that will be patched.
-    // Initially use an invalid map to force a failure. The exact
-    // instruction sequence is important because we use the
-    // kOffsetToStoreInstruction constant for patching. We avoid using
-    // the __ macro for the following two instructions because it
-    // might introduce extra instructions.
-    __ bind(&patch_site);
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    slow.Branch(not_equal, &value, &receiver);
-
-    // The delta from the patch label to the store offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
-           StoreIC::kOffsetToStoreInstruction);
-
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    __ movq(FieldOperand(receiver.reg(), offset), value.reg());
-    __ movq(result.reg(), value.reg());
-
-    // Allocate scratch register for write barrier.
-    Result scratch = allocator()->Allocate();
-    ASSERT(scratch.is_valid());
-
-    // The write barrier clobbers all input registers, so spill the
-    // receiver and the value.
-    frame_->Spill(receiver.reg());
-    frame_->Spill(value.reg());
-
-    // If the receiver and the value share a register allocate a new
-    // register for the receiver.
-    if (receiver.reg().is(value.reg())) {
-      receiver = allocator()->Allocate();
-      ASSERT(receiver.is_valid());
-      __ movq(receiver.reg(), value.reg());
-    }
-
-    // Update the write barrier. To save instructions in the inlined
-    // version we do not filter smis.
-    Label skip_write_barrier;
-    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
-    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
-    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
-    if (FLAG_debug_code) {
-      __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    }
-    __ bind(&skip_write_barrier);
-    value.Unuse();
-    scratch.Unuse();
-    receiver.Unuse();
-    done.Jump(&result);
-
-    slow.Bind(&value, &receiver);
-    frame()->Push(&receiver);
-    frame()->Push(&value);
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // Encode the offset to the map check instruction and the offset
-    // to the write barrier store address computation in a test rax
-    // instruction.
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ testl(rax,
-             Immediate((delta_to_record_write << 16) | delta_to_patch_site));
-    done.Bind(&result);
-  }
-
-  ASSERT_EQ(expected_height, frame()->height());
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Inline array load code if inside of a loop.  We do not know
-  // the receiver map yet, so we initially generate the code with
-  // a check against an invalid map.  In the inline cache code, we
-  // patch the map check if appropriate.
-  if (loop_nesting() > 0) {
-    Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    // Allocate the temporary early so that we use rax if it is free.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    // If key and receiver are shared registers on the frame, their values will
-    // be automatically saved and restored when going to deferred code.
-    // The result is returned in elements, which is not shared.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(elements.reg(),
-                                           receiver.reg(),
-                                           key.reg());
-
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the receiver has the expected map.
-    // Initially, use an invalid map. The map is patched in the IC
-    // initialization code.
-    __ bind(deferred->patch_site());
-    // Use masm-> here instead of the double underscore macro since extra
-    // coverage code can interfere with the patching.  Do not use a load
-    // from the root array to load null_value, since the load must be patched
-    // with the expected receiver map, which is not in the root array.
-    masm_->movq(kScratchRegister, FACTORY->null_value(),
-                RelocInfo::EMBEDDED_OBJECT);
-    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                kScratchRegister);
-    deferred->Branch(not_equal);
-
-    __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
-    // Get the elements array from the receiver.
-    __ movq(elements.reg(),
-            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ AssertFastElements(elements.reg());
-
-    // Check that key is within bounds.
-    __ SmiCompare(key.reg(),
-                  FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Load and check that the result is not the hole.  We could
-    // reuse the index or elements register for the value.
-    //
-    // TODO(206): Consider whether it makes sense to try some
-    // heuristic about which register to reuse.  For example, if
-    // one is rax, the we can reuse that one because the value
-    // coming from the deferred code will be in rax.
-    SmiIndex index =
-        masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize));
-    result = elements;
-    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
-    deferred->Branch(equal);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_load_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    Comment cmnt(masm_, "[ Load from keyed Property");
-    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed load.  The explicit nop instruction is here because
-    // the push that follows might be peep-hole optimized away.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 2);
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
-    // Get the receiver, key and value into registers.
-    result = frame()->Pop();
-    Result key = frame()->Pop();
-    Result receiver = frame()->Pop();
-
-    Result tmp = allocator_->Allocate();
-    ASSERT(tmp.is_valid());
-    Result tmp2 = allocator_->Allocate();
-    ASSERT(tmp2.is_valid());
-
-    // Determine whether the value is a constant before putting it in a
-    // register.
-    bool value_is_constant = result.is_constant();
-
-    // Make sure that value, key and receiver are in registers.
-    result.ToRegister();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(result.reg(),
-                                           key.reg(),
-                                           receiver.reg(),
-                                           strict_mode_flag());
-
-    // Check that the receiver is not a smi.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ JumpIfNotSmi(key.reg(), deferred->entry_label());
-    } else if (FLAG_debug_code) {
-      __ AbortIfNotSmi(key.reg());
-    }
-
-    // Check that the receiver is a JSArray.
-    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Get the elements array from the receiver and check that it is not a
-    // dictionary.
-    __ movq(tmp.reg(),
-            FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
-    // Check whether it is possible to omit the write barrier. If the elements
-    // array is in new space or the value written is a smi we can safely update
-    // the elements array without write barrier.
-    Label in_new_space;
-    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-    if (!value_is_constant) {
-      __ JumpIfNotSmi(result.reg(), deferred->entry_label());
-    }
-
-    __ bind(&in_new_space);
-    // Bind the deferred code patch site to be able to locate the fixed
-    // array map comparison.  When debugging, we patch this comparison to
-    // always fail so that we will hit the IC call in the deferred code
-    // which will allow the debugger to break for fast case stores.
-    __ bind(deferred->patch_site());
-    // Avoid using __ to ensure the distance from patch_site
-    // to the map address is always the same.
-    masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
-               RelocInfo::EMBEDDED_OBJECT);
-    __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-            kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis (because the fixed array check above ensures the
-    // elements are in fast case). Use unsigned comparison to handle negative
-    // keys.
-    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
-                  key.reg());
-    deferred->Branch(below_equal);
-
-    // Store the value.
-    SmiIndex index =
-        masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(FieldOperand(tmp.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize),
-            result.reg());
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_store_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    result = frame()->CallKeyedStoreIC(strict_mode_flag());
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed store.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 3);
-  return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-
-  // Record the source position for the property load.
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      if (persist_after_get_) {
-        cgen_->frame()->Dup();
-      }
-      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
-      cgen_->frame()->Push(&result);
-      break;
-    }
-
-    case KEYED: {
-      // A load of a bare identifier (load from global) cannot be keyed.
-      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
-      if (persist_after_get_) {
-        cgen_->frame()->PushElementAt(1);
-        cgen_->frame()->PushElementAt(1);
-      }
-      Result value = cgen_->EmitKeyedLoad();
-      cgen_->frame()->Push(&value);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-
-  if (!persist_after_get_) {
-    set_unloaded();
-  }
-}
-
-
-void Reference::TakeValue() {
-  // TODO(X64): This function is completely architecture independent. Move
-  // it somewhere shared.
-
-  // For non-constant frame-allocated slots, we invalidate the value in the
-  // slot.  For all others, we fall back on GetValue.
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(!is_illegal());
-  if (type_ != SLOT) {
-    GetValue();
-    return;
-  }
-
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP ||
-      slot->type() == Slot::CONTEXT ||
-      slot->var()->mode() == Variable::CONST ||
-      slot->is_arguments()) {
-    GetValue();
-    return;
-  }
-
-  // Only non-constant, frame-allocated parameters and locals can reach
-  // here.  Be careful not to use the optimizations for arguments
-  // object access since it may not have been initialized yet.
-  ASSERT(!slot->is_arguments());
-  if (slot->type() == Slot::PARAMETER) {
-    cgen_->frame()->TakeParameterAt(slot->index());
-  } else {
-    ASSERT(slot->type() == Slot::LOCAL);
-    cgen_->frame()->TakeLocalAt(slot->index());
-  }
-
-  ASSERT(persist_after_get_);
-  // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      Result answer = cgen_->EmitNamedStore(GetName(), false);
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression()->AsProperty();
-      ASSERT(property != NULL);
-
-      Result answer = cgen_->EmitKeyedStore(property->key()->type());
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case UNLOADED:
-    case ILLEGAL:
-      UNREACHABLE();
-  }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                                      Result* left,
-                                                      Result* right) {
-  if (stub->ArgsInRegistersSupported()) {
-    stub->SetArgsInRegisters();
-    return frame_->CallStub(stub, left, right);
-  } else {
-    frame_->Push(left);
-    frame_->Push(right);
-    return frame_->CallStub(stub, 2);
-  }
-}
-
-#undef __
-
 #define __ masm.
 
 #ifdef _WIN64
@@ -8758,7 +58,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler masm(buffer, static_cast<int>(actual_size));
+  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
   // (e.g. the JavaScript nan-object).
@@ -8832,7 +132,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
+  // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 9a70907..94c7850 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -30,270 +30,17 @@
 
 #include "ast.h"
 #include "ic-inl.h"
-#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 
 // -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  The value of the reference may be invalidated,
-  // causing subsequent attempts to read it to fail.
-  void TakeValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through.  The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally.  Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
-  ControlDestination(JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool true_is_fall_through)
-      : true_target_(true_target),
-        false_target_(false_target),
-        true_is_fall_through_(true_is_fall_through),
-        is_used_(false) {
-    ASSERT(true_is_fall_through ? !true_target->is_bound()
-                                : !false_target->is_bound());
-  }
-
-  // Accessors for the jump targets.  Directly jumping or branching to
-  // or binding the targets will not update the destination's state.
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
-
-  // True if the the destination has been jumped to unconditionally or
-  // control has been split to both targets.  This predicate does not
-  // test whether the targets have been extracted and manipulated as
-  // raw jump targets.
-  bool is_used() const { return is_used_; }
-
-  // True if the destination is used and the true target (respectively
-  // false target) was the fall through.  If the target is backward,
-  // "fall through" included jumping unconditionally to it.
-  bool true_was_fall_through() const {
-    return is_used_ && true_is_fall_through_;
-  }
-
-  bool false_was_fall_through() const {
-    return is_used_ && !true_is_fall_through_;
-  }
-
-  // Emit a branch to one of the true or false targets, and bind the
-  // other target.  Because this binds the fall-through target, it
-  // should be emitted in tail position (as the last thing when
-  // compiling an expression).
-  void Split(Condition cc) {
-    ASSERT(!is_used_);
-    if (true_is_fall_through_) {
-      false_target_->Branch(NegateCondition(cc));
-      true_target_->Bind();
-    } else {
-      true_target_->Branch(cc);
-      false_target_->Bind();
-    }
-    is_used_ = true;
-  }
-
-  // Emit an unconditional jump in tail position, to the true target
-  // (if the argument is true) or the false target.  The "jump" will
-  // actually bind the jump target if it is forward, jump to it if it
-  // is backward.
-  void Goto(bool where) {
-    ASSERT(!is_used_);
-    JumpTarget* target = where ? true_target_ : false_target_;
-    if (target->is_bound()) {
-      target->Jump();
-    } else {
-      target->Bind();
-    }
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Mark this jump target as used as if Goto had been called, but
-  // without generating a jump or binding a label (the control effect
-  // should have already happened).  This is used when the left
-  // subexpression of the short-circuit boolean operators are
-  // compiled.
-  void Use(bool where) {
-    ASSERT(!is_used_);
-    ASSERT((where ? true_target_ : false_target_)->is_bound());
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Swap the true and false targets but keep the same actual label as
-  // the fall through.  This is used when compiling negated
-  // expressions, where we want to swap the targets but preserve the
-  // state.
-  void Invert() {
-    JumpTarget* temp_target = true_target_;
-    true_target_ = false_target_;
-    false_target_ = temp_target;
-
-    true_is_fall_through_ = !true_is_fall_through_;
-  }
-
- private:
-  // True and false jump targets.
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-
-  // Before using the destination: true if the true target is the
-  // preferred fall through, false if the false target is.  After
-  // using the destination: true if the true target was actually used
-  // as the fall through, false if the false target was.
-  bool true_is_fall_through_;
-
-  // True if the Split or Goto functions have been called.
-  bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair).  It is threaded through
-// the call stack.  Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own control destination.
-  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  ~CodeGenState();
-
-  // Accessors for the state.
-  ControlDestination* destination() const { return destination_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-  // A control destination in case the expression has a control-flow
-  // effect.
-  ControlDestination* destination_;
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -319,431 +66,7 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
-  bool in_spilled_code() const { return in_spilled_code_; }
-  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  // State
-  ControlDestination* destination() const { return state_->destination(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).
-  void GenerateReturnSequence(Result* return_value);
-
-  // Generate code for a fast smi loop.
-  void GenerateFastSmiLoop(ForStatement* node);
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  Result StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  Operand SlotOperand(Slot* slot, Register tmp);
-
-  Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Result tmp,
-                                            JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* x,
-                     ControlDestination* destination,
-                     bool force_control);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver();
-
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                           TypeofState typeof_state,
-                                           JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Result* result,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the expression stack into a slot, leaving the
-  // value in place.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Receiver is passed on the frame and not consumed.
-  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // If the store is contextual, value is passed on the frame and consumed.
-  // Otherwise, receiver and value are passed on the frame and consumed.
-  Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a property of an object, returning it in a Result.
-  // The object and the property name are passed on the stack, and
-  // not changed.
-  Result EmitKeyedLoad();
-
-  // Receiver, key, and value are passed on the frame and consumed.
-  Result EmitKeyedStore(StaticType* key_type);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Translate the value on top of the frame into control flow to the
-  // control destination.
-  void ToBoolean(ControlDestination* destination);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(BinaryOperation* expr,
-                              OverwriteMode overwrite_mode);
-
-  // Generate a stub call from the virtual frame.
-  Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                         Result* left,
-                                         Result* right);
-
-  // Emits code sequence that jumps to a JumpTarget if the inputs
-  // are both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfBothSmiUsingTypeInfo(Result* left,
-                                  Result* right,
-                                  JumpTarget* both_smi);
-
-  // Emits code sequence that jumps to deferred code if the input
-  // is not a smi.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotSmiUsingTypeInfo(Register reg,
-                                 TypeInfo type,
-                                 DeferredCode* deferred);
-
-  // Emits code sequence that jumps to deferred code if the inputs
-  // are not both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     DeferredCode* deferred);
-
-  // If possible, combine two constant smi values using op to produce
-  // a smi result, and push it on the virtual frame, all at compile time.
-  // Returns true if it succeeds.  Otherwise it has no effect.
-  bool FoldConstantSmis(Token::Value op, int left, int right);
-
-  // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result *operand.
-  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                    Result* operand,
-                                    Handle<Object> constant_operand,
-                                    bool reversed,
-                                    OverwriteMode overwrite_mode);
-
-  // Emit code to perform a binary operation on two likely smis.
-  // The code to handle smi arguments is produced inline.
-  // Consumes the Results *left and *right.
-  Result LikelySmiBinaryOperation(BinaryOperation* expr,
-                                  Result* left,
-                                  Result* right,
-                                  OverwriteMode overwrite_mode);
-
-  void Comparison(AstNode* node,
-                  Condition cc,
-                  bool strict,
-                  ControlDestination* destination);
-
-  // If at least one of the sides is a constant smi, generate optimized code.
-  void ConstantSmiComparison(Condition cc,
-                             bool strict,
-                             ControlDestination* destination,
-                             Result* left_side,
-                             Result* right_side,
-                             bool left_side_constant_smi,
-                             bool right_side_constant_smi,
-                             bool is_loop_condition);
-
-  void GenerateInlineNumberComparison(Result* left_side,
-                                      Result* right_side,
-                                      Condition cc,
-                                      ControlDestination* dest);
-
-  // To prevent long attacker-controlled byte sequences, integer constants
-  // from the JavaScript source are loaded in two parts if they are larger
-  // than 16 bits.
-  static const int kMaxSmiInlinedBits = 16;
-  bool IsUnsafeSmi(Handle<Object> value);
-  // Load an integer constant x into a register target using
-  // at most 16 bits of user-controlled data per assembly operation.
-  void LoadUnsafeSmi(Register target, Handle<Object> value);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements. Takes three expressions, the object and two
-  // indices. This should only be used if the indices are known to be
-  // non-negative and within bounds of the elements array at the call site.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  // Check whether two RegExps are equivalent.
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.  There should
-  // be no frame-external references to (non-reserved) registers.
-  bool HasValidEntryRegisters();
-#endif
-
-  ZoneList<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  // Jump targets.
-  // The target of the return from the function.
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  // True when we are in code that expects the virtual frame to be fully
-  // spilled.  Some virtual frame function are disabled in DEBUG builds when
-  // called from spilled code, because they do not leave the virtual frame
-  // in a spilled state.
-  bool in_spilled_code_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class Result;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-
-  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
-  friend class InlineRuntimeFunctionsTable;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index b49fb1c..e637ba1 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -42,10 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  Isolate::Current()->cpu_features()->Probe(true);
-  if (Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return true;  // Yay!
 }
 
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 0398465..423e6f2 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -29,7 +29,8 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "assembler.h"
+#include "codegen.h"
 #include "debug.h"
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 2080c61..abac2b6 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -600,7 +600,6 @@
 
 void Deoptimizer::EntryGenerator::Generate() {
   GeneratePrologue();
-  CpuFeatures::Scope scope(SSE2);
 
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
@@ -663,23 +662,26 @@
   __ neg(arg5);
 
   // Allocate a new deoptimizer object.
-  __ PrepareCallCFunction(5);
+  __ PrepareCallCFunction(6);
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ movq(arg1, rax);
-  __ movq(arg2, Immediate(type()));
+  __ Set(arg2, type());
   // Args 3 and 4 are already in the right registers.
 
-  // On windows put the argument on the stack (PrepareCallCFunction have
-  // created space for this). On linux pass the argument in r8.
+  // On windows put the arguments on the stack (PrepareCallCFunction
+  // has created space for this). On linux pass the arguments in r8 and r9.
 #ifdef _WIN64
   __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+  __ LoadAddress(arg5, ExternalReference::isolate_address());
+  __ movq(Operand(rsp, 5 * kPointerSize), arg5);
 #else
   __ movq(r8, arg5);
+  __ LoadAddress(r9, ExternalReference::isolate_address());
 #endif
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -722,10 +724,11 @@
 
   // Compute the output frame in the deoptimizer.
   __ push(rax);
-  __ PrepareCallCFunction(1);
+  __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
+  __ LoadAddress(arg2, ExternalReference::isolate_address());
   __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+      ExternalReference::compute_output_frames_function(isolate), 2);
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 189ee42..82bc6ef 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -652,6 +652,9 @@
     case 2:
       mnem = "adc";
       break;
+    case 3:
+      mnem = "sbb";
+      break;
     case 4:
       mnem = "and";
       break;
@@ -1018,12 +1021,26 @@
         current += PrintRightOperand(current);
         AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x0b) {
+        get_modrm(*current, &mod, &regop, &rm);
+         // roundsd xmm, xmm/m64, imm8
+        AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(", %d", (*current) & 3);
+        current += 1;
       } else {
         UnimplementedInstruction();
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x6E) {
+      if (opcode == 0x28) {
+        AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      } else if (opcode == 0x29) {
+        AppendToBuffer("movapd ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0x6E) {
         AppendToBuffer("mov%c %s,",
                        rex_w() ? 'q' : 'd',
                        NameOfXMMRegister(regop));
@@ -1041,6 +1058,10 @@
         AppendToBuffer("movdqa ");
         current += PrintRightXMMOperand(current);
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0xD6) {
+        AppendToBuffer("movq ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
       } else {
         const char* mnemonic = "?";
         if (opcode == 0x50) {
@@ -1142,6 +1163,11 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x7E) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1159,6 +1185,22 @@
       current += 4;
     }  // else no immediate displacement.
     AppendToBuffer("nop");
+
+  } else if (opcode == 28) {
+    // movaps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
+  } else if (opcode == 29) {
+    // movaps xmm/m128, xmm
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps");
+    current += PrintRightXMMOperand(current);
+    AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
   } else if (opcode == 0xA2 || opcode == 0x31) {
     // RDTSC or CPUID
     AppendToBuffer("%s", mnemonic);
@@ -1170,6 +1212,13 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
+  } else if (opcode == 57) {
+    // xoprps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
   } else if ((opcode & 0xF0) == 0x80) {
     // Jcc: Conditional jump (branch).
     current = data + JumpConditional(data);
@@ -1502,7 +1551,39 @@
         data++;
       }
         break;
-
+      case 0xB0:
+      case 0xB1:
+      case 0xB2:
+      case 0xB3:
+      case 0xB4:
+      case 0xB5:
+      case 0xB6:
+      case 0xB7:
+      case 0xB8:
+      case 0xB9:
+      case 0xBA:
+      case 0xBB:
+      case 0xBC:
+      case 0xBD:
+      case 0xBE:
+      case 0xBF: {
+        // mov reg8,imm8 or mov reg32,imm32
+        byte opcode = *data;
+        data++;
+        bool is_32bit = (opcode >= 0xB8);
+        int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
+        if (is_32bit) {
+          AppendToBuffer("mov%c %s, ",
+                         operand_size_code(),
+                         NameOfCPURegister(reg));
+          data += PrintImmediate(data, DOUBLEWORD_SIZE);
+        } else {
+          AppendToBuffer("movb %s, ",
+                         NameOfByteCPURegister(reg));
+          data += PrintImmediate(data, BYTE_SIZE);
+        }
+        break;
+      }
       case 0xFE: {
         data++;
         int mod, regop, rm;
@@ -1513,9 +1594,8 @@
         } else {
           UnimplementedInstruction();
         }
-      }
         break;
-
+      }
       case 0x68:
         AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
         data += 5;
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 81be819..b14267c 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -99,7 +99,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 90afd85..d5fb7da 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -232,7 +232,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       NearLabel ok;
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
       __ j(above_equal, &ok);
@@ -781,7 +781,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -809,7 +809,7 @@
       __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target()->entry_label());
+      __ jmp(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -821,7 +821,7 @@
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target()->entry_label());
+    __ jmp(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -831,14 +831,14 @@
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_target());
   } else {
-    __ jmp(default_clause->body_target()->entry_label());
+    __ jmp(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -1576,27 +1576,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(rax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2248,15 +2247,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the full code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2435,11 +2425,71 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  if (FLAG_debug_code) __ AbortIfSmi(rax);
+
+  // Check whether this map has already been checked to be safe for default
+  // valueOf.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rbx, Map::kBitField2Offset),
+           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ j(not_zero, if_true);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
+  __ j(equal, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+  // rbx: descriptor array
+  // rcx: length of descriptor array
+  // Calculate the end of the descriptor array.
+  SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
+  __ lea(rcx,
+         Operand(
+             rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+  // Calculate location of the first key name.
+  __ addq(rbx,
+          Immediate(FixedArray::kHeaderSize +
+                    DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rdx, FieldOperand(rbx, 0));
+  __ Cmp(rdx, FACTORY->value_of_symbol());
+  __ j(equal, if_false);
+  __ addq(rbx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ cmpq(rbx, rcx);
+  __ j(not_equal, &loop);
+
+  // Reload map as register rbx was used as temporary above.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  __ testq(rcx, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+  __ cmpq(rcx,
+          ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, if_false);
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2693,8 +2743,13 @@
 
   // Return a random uint32 number in rax.
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  __ PrepareCallCFunction(1);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
@@ -2703,7 +2758,7 @@
   __ movd(xmm1, rcx);
   __ movd(xmm0, rax);
   __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
+  __ xorps(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
 
@@ -2988,15 +3043,14 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in rdi. Move it in there.
-  if (!result_register().is(rdi)) __ movq(rdi, result_register());
+  // InvokeFunction requires the function in rdi. Move it in there.
+  __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -3753,7 +3807,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
@@ -4173,30 +4231,7 @@
     default:
       break;
   }
-
   __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
 }
 
 
@@ -4217,7 +4252,6 @@
     default:
       break;
   }
-
   __ call(ic, RelocInfo::CODE_TARGET);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9180465..5ed89b5 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
@@ -381,11 +381,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
@@ -1010,7 +1005,7 @@
 
   // Call the entry.
   CEntryStub stub(1);
-  __ movq(rax, Immediate(2));
+  __ Set(rax, 2);
   __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
   __ CallStub(&stub);
 
@@ -1297,130 +1292,6 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 8 bytes of the 10-byte
-  // immediate move instruction, so we add 2 to get the
-  // offset to the last 8 bytes.
-  Address map_address = test_instruction_address + delta + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the 32-bit displacement of a seven byte
-  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
-  // so we add 3 to get the offset of the displacement.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // TODO(<bug#>): implement this.
-  return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test rax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 8 bytes of
-  // the 10-byte immediate move instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a 7 byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a 7 byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 3;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Arguments are address of start of call sequence that called
-  // the IC,
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map compare
-  // instructions (starting with the 64-bit immediate mov of the map
-  // address). This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 8 bytes
-  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 2;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : key
@@ -1503,11 +1374,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
deleted file mode 100644
index e715604..0000000
--- a/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There is an expected frame to merge to.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else if (entry_frame_ != NULL) {
-    // Forward jump with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and jump to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else {
-    // Forward jump.  Remember the current frame and emit a jump to
-    // its merge code.
-    AddReachingFrame(cgen()->frame());
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-    __ jmp(&merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
-    // Backward branch.  We have an expected frame to merge to on the
-    // backward edge.
-
-    // Swap the current frame for a copy (we do the swapping to get
-    // the off-frame registers off the fall through) to use for the
-    // branch.
-    VirtualFrame* fall_through_frame = cgen()->frame();
-    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
-    RegisterFile non_frame_registers;
-    cgen()->SetFrame(branch_frame, &non_frame_registers);
-
-    // Check if we can avoid merge code.
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen()->frame()->Equals(entry_frame_)) {
-      // Branch right in to the block.
-      cgen()->DeleteFrame();
-      __ j(cc, &entry_label_);
-      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-      return;
-    }
-
-    // Check if we can reuse existing merge code.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (reaching_frames_[i] != NULL &&
-          cgen()->frame()->Equals(reaching_frames_[i])) {
-        // Branch to the merge code.
-        cgen()->DeleteFrame();
-        __ j(cc, &merge_labels_[i]);
-        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-        return;
-      }
-    }
-
-    // To emit the merge code here, we negate the condition and branch
-    // around the merge code on the fall through path.
-    Label original_fall_through;
-    __ j(NegateCondition(cc), &original_fall_through);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-    __ bind(&original_fall_through);
-
-  } else if (entry_frame_ != NULL) {
-    // Forward branch with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and branch to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    // Explicitly use the macro assembler instead of __ as forward
-    // branches are expected to be a fixed size (no inserted
-    // coverage-checking instructions please).  This is used in
-    // Reference::GetValue.
-    cgen()->masm()->j(cc, &entry_label_);
-
-  } else {
-    // Forward branch.  A copy of the current frame is remembered and
-    // a branch to the merge code is emitted.  Explicitly use the
-    // macro assembler instead of __ as forward branches are expected
-    // to be a fixed size (no inserted coverage-checking instructions
-    // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen()->frame()));
-    cgen()->masm()->j(cc, &merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  cgen()->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  // We do not expect a call with a preconfigured entry frame.
-  ASSERT(entry_frame_ == NULL);
-  AddReachingFrame(target_frame);
-  __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(cgen() != NULL);
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  // Fast case: the jump target was manually configured with an entry
-  // frame to use.
-  if (entry_frame_ != NULL) {
-    // Assert no reaching frames to deal with.
-    ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen()->has_valid_frame());
-
-    RegisterFile empty;
-    if (direction_ == BIDIRECTIONAL) {
-      // Copy the entry frame so the original can be used for a
-      // possible backward jump.
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    } else {
-      // Take ownership of the entry frame.
-      cgen()->SetFrame(entry_frame_, &empty);
-      entry_frame_ = NULL;
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (!is_linked()) {
-    ASSERT(cgen()->has_valid_frame());
-    if (direction_ == FORWARD_ONLY) {
-      // Fast case: no forward jumps and no possible backward jumps.
-      // The stack pointer can be floating above the top of the
-      // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen()->frame();
-      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-      if (difference > 0) {
-        frame->stack_pointer_ -= difference;
-        __ addq(rsp, Immediate(difference * kPointerSize));
-      }
-    } else {
-      ASSERT(direction_ == BIDIRECTIONAL);
-      // Fast case: no forward jumps, possible backward ones.  Remove
-      // constants and copies above the watermark on the fall-through
-      // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable();
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (direction_ == FORWARD_ONLY &&
-      !cgen()->has_valid_frame() &&
-      reaching_frames_.length() == 1) {
-    // Fast case: no fall-through, a single forward jump, and no
-    // possible backward jumps.  Pick up the only reaching frame, take
-    // ownership of it, and use it for the block about to be emitted.
-    VirtualFrame* frame = reaching_frames_[0];
-    RegisterFile empty;
-    cgen()->SetFrame(frame, &empty);
-    reaching_frames_[0] = NULL;
-    __ bind(&merge_labels_[0]);
-
-    // The stack pointer can be floating above the top of the
-    // virtual frame before the bind.  Afterward, it should not.
-    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-    if (difference > 0) {
-      frame->stack_pointer_ -= difference;
-      __ addq(rsp, Immediate(difference * kPointerSize));
-    }
-
-    __ bind(&entry_label_);
-    return;
-  }
-
-  // If there is a current frame, record it as the fall-through.  It
-  // is owned by the reaching frames for now.
-  bool had_fall_through = false;
-  if (cgen()->has_valid_frame()) {
-    had_fall_through = true;
-    AddReachingFrame(cgen()->frame());  // Return value ignored.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-
-  // Compute the frame to use for entry to the block.
-  ComputeEntryFrame();
-
-  // Some moves required to merge to an expected frame require purely
-  // frame state changes, and do not require any code generation.
-  // Perform those first to increase the possibility of finding equal
-  // frames below.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    if (reaching_frames_[i] != NULL) {
-      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
-    }
-  }
-
-  if (is_linked()) {
-    // There were forward jumps.  Handle merging the reaching frames
-    // to the entry frame.
-
-    // Loop over the (non-null) reaching frames and process any that
-    // need merge code.  Iterate backwards through the list to handle
-    // the fall-through frame first.  Set frames that will be
-    // processed after 'i' to NULL if we want to avoid processing
-    // them.
-    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
-      VirtualFrame* frame = reaching_frames_[i];
-
-      if (frame != NULL) {
-        // Does the frame (probably) need merge code?
-        if (!frame->Equals(entry_frame_)) {
-          // We could have a valid frame as the fall through to the
-          // binding site or as the fall through from a previous merge
-          // code block.  Jump around the code we are about to
-          // generate.
-          if (cgen()->has_valid_frame()) {
-            cgen()->DeleteFrame();
-            __ jmp(&entry_label_);
-          }
-          // Pick up the frame for this block.  Assume ownership if
-          // there cannot be backward jumps.
-          RegisterFile empty;
-          if (direction_ == BIDIRECTIONAL) {
-            cgen()->SetFrame(new VirtualFrame(frame), &empty);
-          } else {
-            cgen()->SetFrame(frame, &empty);
-            reaching_frames_[i] = NULL;
-          }
-          __ bind(&merge_labels_[i]);
-
-          // Loop over the remaining (non-null) reaching frames,
-          // looking for any that can share merge code with this one.
-          for (int j = 0; j < i; j++) {
-            VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen()->frame())) {
-              // Set the reaching frame element to null to avoid
-              // processing it later, and then bind its entry label.
-              reaching_frames_[j] = NULL;
-              __ bind(&merge_labels_[j]);
-            }
-          }
-
-          // Emit the merge code.
-          cgen()->frame()->MergeTo(entry_frame_);
-        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
-          // If this is the fall through frame, and it didn't need
-          // merge code, we need to pick up the frame so we can jump
-          // around subsequent merge blocks if necessary.
-          RegisterFile empty;
-          cgen()->SetFrame(frame, &empty);
-          reaching_frames_[i] = NULL;
-        }
-      }
-    }
-
-    // The code generator may not have a current frame if there was no
-    // fall through and none of the reaching frames needed merging.
-    // In that case, clone the entry frame as the current frame.
-    if (!cgen()->has_valid_frame()) {
-      RegisterFile empty;
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    }
-
-    // There may be unprocessed reaching frames that did not need
-    // merge code.  They will have unbound merge labels.  Bind their
-    // merge labels to be the same as the entry label and deallocate
-    // them.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (!merge_labels_[i].is_bound()) {
-        reaching_frames_[i] = NULL;
-        __ bind(&merge_labels_[i]);
-      }
-    }
-
-    // There are non-NULL reaching frames with bound labels for each
-    // merge block, but only on backward targets.
-  } else {
-    // There were no forward jumps.  There must be a current frame and
-    // this must be a bidirectional target.
-    ASSERT(reaching_frames_.length() == 1);
-    ASSERT(reaching_frames_[0] != NULL);
-    ASSERT(direction_ == BIDIRECTIONAL);
-
-    // Use a copy of the reaching frame so the original can be saved
-    // for possible reuse as a backward merge block.
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
-    __ bind(&merge_labels_[0]);
-    cgen()->frame()->MergeTo(entry_frame_);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-  }
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 86a7e83..c242874 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -91,7 +91,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -146,10 +146,10 @@
   __ push(rdi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
-      __ movl(rax, Immediate(slots));
+      __ Set(rax, slots);
       __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
       Label loop;
       __ bind(&loop);
@@ -290,7 +290,7 @@
   while (byte_count-- > 0) {
     __ int3();
   }
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -418,7 +418,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -440,14 +440,16 @@
 }
 
 
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode,
+                               int argc) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ call(code, mode);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode, argc);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
@@ -458,6 +460,13 @@
 }
 
 
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
 void LCodeGen::CallRuntime(const Runtime::Function* function,
                            int num_arguments,
                            LInstruction* instr) {
@@ -467,11 +476,23 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode,
+                                          int argc) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -483,8 +504,17 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    ASSERT(argc == 0);
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        argc,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -534,7 +564,7 @@
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
         jump_table_.last().address != entry) {
-      jump_table_.Add(entry);
+      jump_table_.Add(JumpTableEntry(entry));
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -605,6 +635,8 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(kind == expected_safepoint_kind_);
+
   const ZoneList<LOperand*>* operands = pointers->operands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -1067,7 +1099,7 @@
 
 void LCodeGen::DoConstantI(LConstantI* instr) {
   ASSERT(instr->result()->IsRegister());
-  __ movl(ToRegister(instr->result()), Immediate(instr->value()));
+  __ Set(ToRegister(instr->result()), instr->value());
 }
 
 
@@ -1079,7 +1111,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (int_val == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register tmp = ToRegister(instr->TempAt(0));
     __ Set(tmp, int_val);
@@ -1191,12 +1223,12 @@
       break;
     case Token::MOD:
       __ PrepareCallCFunction(2);
-      __ movsd(xmm0, left);
+      __ movaps(xmm0, left);
       ASSERT(right.is(xmm1));
       __ CallCFunction(
           ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
       __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ movsd(result, xmm0);
+      __ movaps(result, xmm0);
       break;
     default:
       UNREACHABLE();
@@ -1255,7 +1287,7 @@
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
@@ -1290,7 +1322,7 @@
 
       // HeapNumber => false iff +0, -0, or NaN. These three cases set the
       // zero flag when compared to zero using ucomisd.
-      __ xorpd(xmm0, xmm0);
+      __ xorps(xmm0, xmm0);
       __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
       __ j(zero, false_label);
       __ jmp(true_label);
@@ -1328,11 +1360,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ Pushad();
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ Popad();
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 
@@ -1485,10 +1514,11 @@
 
   __ CompareRoot(reg, Heap::kNullValueRootIndex);
   if (instr->is_strict()) {
+    ASSERT(Heap::kTrueValueRootIndex >= 0);
     __ movl(result, Immediate(Heap::kTrueValueRootIndex));
     NearLabel load;
     __ j(equal, &load);
-    __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+    __ Set(result, Heap::kFalseValueRootIndex);
     __ bind(&load);
     __ LoadRootIndexed(result, result, 0);
   } else {
@@ -1937,23 +1967,36 @@
 
 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                 Label* map_check) {
-  __ PushSafepointRegisters();
-  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
-      InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
-  InstanceofStub stub(flags);
+  {
+    PushSafepointRegistersScope scope(this);
+    InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+        InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
+    InstanceofStub stub(flags);
 
-  __ push(ToRegister(instr->InputAt(0)));
-  __ Push(instr->function());
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(rdi));
-  static const int kAdditionalDelta = 16;
-  int delta =
-      masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
-  __ movq(temp, Immediate(delta));
-  __ push(temp);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+    __ push(ToRegister(instr->InputAt(0)));
+    __ Push(instr->function());
+
+    Register temp = ToRegister(instr->TempAt(0));
+    static const int kAdditionalDelta = 10;
+    int delta =
+        masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+    ASSERT(delta >= 0);
+    __ push_imm32(delta);
+
+    // We are pushing three values on the stack but recording a
+    // safepoint with two arguments because stub is going to
+    // remove the third argument from the stack before jumping
+    // to instanceof builtin on the slow path.
+    CallCodeGeneric(stub.GetCode(),
+                    RelocInfo::CODE_TARGET,
+                    instr,
+                    RECORD_SAFEPOINT_WITH_REGISTERS,
+                    2);
+    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+    // Move result to a register that survives the end of the
+    // PushSafepointRegisterScope.
+    __ movq(kScratchRegister, rax);
+  }
   __ testq(kScratchRegister, kScratchRegister);
   Label load_false;
   Label done;
@@ -2015,11 +2058,11 @@
   }
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   if (result.is(rax)) {
     __ load_rax(instr->hydrogen()->cell().location(),
@@ -2035,7 +2078,19 @@
 }
 
 
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rax));
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  __ Move(rcx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(!value.is(temp));
@@ -2058,6 +2113,18 @@
 }
 
 
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rdx));
+  ASSERT(ToRegister(instr->value()).is(rax));
+
+  __ Move(rcx, instr->name());
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2362,14 +2429,14 @@
   } else {
     __ cmpq(rbp, ToOperand(instr->InputAt(0)));
   }
-  __ movq(result, Immediate(scope()->num_parameters()));
+  __ movl(result, Immediate(scope()->num_parameters()));
   __ j(equal, &done);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(result, Operand(result,
-                          ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToInteger32(result, result);
+  __ SmiToInteger32(result,
+                    Operand(result,
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
 
   // Argument length is in result register.
   __ bind(&done);
@@ -2440,25 +2507,19 @@
                                          env->deoptimization_index());
   v8::internal::ParameterCount actual(rax);
   __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    EmitPushConstantOperand(argument);
-  } else if (argument->IsRegister()) {
-    __ push(ToRegister(argument));
-  } else {
-    ASSERT(!argument->IsDoubleRegister());
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
 }
 
 
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ movq(result, rsi);
 }
 
 
@@ -2513,7 +2574,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2538,7 +2599,7 @@
   Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   Label negative;
   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2559,9 +2620,7 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(rax)) {
     __ movq(tmp, rax);
@@ -2578,7 +2637,6 @@
   __ StoreToSafepointRegisterSlot(input_reg, tmp);
 
   __ bind(&done);
-  __ PopSafepointRegisters();
 }
 
 
@@ -2613,7 +2671,7 @@
   if (r.IsDouble()) {
     XMMRegister scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(scratch, scratch);
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ andpd(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2624,7 +2682,9 @@
     Register input_reg = ToRegister(instr->InputAt(0));
     // Smi check.
     __ JumpIfNotSmi(input_reg, deferred->entry());
+    __ SmiToInteger32(input_reg, input_reg);
     EmitIntegerMathAbs(instr);
+    __ Integer32ToSmi(input_reg, input_reg);
     __ bind(deferred->exit());
   }
 }
@@ -2634,21 +2694,36 @@
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-  __ ucomisd(input_reg, xmm_scratch);
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(below_equal, instr->environment());
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatures::Scope scope(SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize if minus zero.
+      __ movq(output_reg, input_reg);
+      __ subq(output_reg, Immediate(1));
+      DeoptimizeIf(overflow, instr->environment());
+    }
+    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+    __ cvttsd2si(output_reg, xmm_scratch);
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   } else {
-    DeoptimizeIf(below, instr->environment());
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(below_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, input_reg);
+
+    // Overflow is signalled with minint.
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   }
-
-  // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
-
-  // Overflow is signalled with minint.
-  __ cmpl(output_reg, Immediate(0x80000000));
-  DeoptimizeIf(equal, instr->environment());
 }
 
 
@@ -2657,33 +2732,44 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label done;
   // xmm_scratch = 0.5
   __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
   __ movq(xmm_scratch, kScratchRegister);
-
+  NearLabel below_half;
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_half);  // If input_reg is NaN, this doesn't jump.
   // input = input + 0.5
+  // This addition might give a result that isn't the correct for
+  // rounding, due to loss of precision, but only for a number that's
+  // so big that the conversion below will overflow anyway.
   __ addsd(input_reg, xmm_scratch);
-
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
-
-  // Compute Math.floor(value + 0.5).
+  // Compute Math.floor(input).
   // Use truncating instruction (OK because input is positive).
   __ cvttsd2si(output_reg, input_reg);
-
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x80000000));
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bailout if negative (including -0).
+    __ movq(output_reg, input_reg);
+    __ testq(output_reg, output_reg);
+    DeoptimizeIf(negative, instr->environment());
+  } else {
+    // Bailout if below -0.5, otherwise round to (positive) zero, even
+    // if negative.
+    // xmm_scrach = -0.5
+    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+    __ movq(xmm_scratch, kScratchRegister);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ xorl(output_reg, output_reg);
+
+  __ bind(&done);
 }
 
 
@@ -2698,7 +2784,7 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2714,7 +2800,7 @@
   if (exponent_type.IsDouble()) {
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     ASSERT(ToDoubleRegister(right).is(xmm1));
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
@@ -2722,7 +2808,7 @@
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers: xmm0 and edi (not rdi).
     // On Windows, the registers are xmm0 and edx.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
 #ifdef _WIN64
     ASSERT(ToRegister(right).is(rdx));
 #else
@@ -2732,7 +2818,6 @@
         ExternalReference::power_double_int_function(isolate()), 2);
   } else {
     ASSERT(exponent_type.IsTagged());
-    CpuFeatures::Scope scope(SSE2);
     Register right_reg = ToRegister(right);
 
     Label non_smi, call;
@@ -2749,13 +2834,13 @@
     __ bind(&call);
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers xmm0 and xmm1.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     // Right argument is already in xmm1.
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
   }
   // Return value is in xmm0.
-  __ movsd(result_reg, xmm0);
+  __ movaps(result_reg, xmm0);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2818,6 +2903,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(rdi, count, CALL_FUNCTION, &generator);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->result()).is(rax));
@@ -2921,7 +3021,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3017,13 +3117,21 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3138,7 +3246,7 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3151,16 +3259,12 @@
     __ Integer32ToSmi(index, index);
     __ push(index);
   }
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(rax);
   }
   __ SmiToInteger32(rax, rax);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3203,14 +3307,11 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ Integer32ToSmi(char_code, char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3275,13 +3376,12 @@
   Register reg = ToRegister(instr->result());
   __ Move(reg, Smi::FromInt(0));
 
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  // Ensure that value in rax survives popping registers.
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+  {
+    PushSafepointRegistersScope scope(this);
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Ensure that value in rax survives popping registers.
+    __ movq(kScratchRegister, rax);
+  }
   __ movq(reg, kScratchRegister);
 }
 
@@ -3322,7 +3422,7 @@
   DeoptimizeIf(not_equal, env);
 
   // Convert undefined to NaN. Compute NaN as 0/0.
-  __ xorpd(result_reg, result_reg);
+  __ xorps(result_reg, result_reg);
   __ divsd(result_reg, result_reg);
   __ jmp(&done);
 
@@ -3363,7 +3463,7 @@
     // conversions.
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movl(input_reg, Immediate(0));
+    __ Set(input_reg, 0);
     __ jmp(&done);
 
     __ bind(&heap_number);
@@ -3371,7 +3471,7 @@
     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     __ cvttsd2siq(input_reg, xmm0);
     __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
-    __ cmpl(input_reg, kScratchRegister);
+    __ cmpq(input_reg, kScratchRegister);
     DeoptimizeIf(equal, instr->environment());
   } else {
     // Deoptimize if we don't have a heap number.
@@ -3436,7 +3536,7 @@
     // the JS bitwise operations.
     __ cvttsd2siq(result_reg, input_reg);
     __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
-    __ cmpl(result_reg, kScratchRegister);
+    __ cmpq(result_reg, kScratchRegister);
       DeoptimizeIf(equal, instr->environment());
   } else {
     __ cvttsd2si(result_reg, input_reg);
@@ -3691,14 +3791,7 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(0);
-  if (input->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(input)));
-  } else if (input->IsRegister()) {
-    __ push(ToRegister(input));
-  } else {
-    ASSERT(input->IsStackSlot());
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -3726,19 +3819,14 @@
 }
 
 
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
-  ASSERT(operand->IsConstantOperand());
-  LConstantOperand* const_op = LConstantOperand::cast(operand);
-  Handle<Object> literal = chunk_->LookupLiteral(const_op);
-  Representation r = chunk_->LookupLiteralRepresentation(const_op);
-  if (r.IsInteger32()) {
-    ASSERT(literal->IsNumber());
-    __ push(Immediate(static_cast<int32_t>(literal->Number())));
-  } else if (r.IsDouble()) {
-    Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
   } else {
-    ASSERT(r.IsTagged());
-    __ Push(literal);
+    __ push(ToOperand(operand));
   }
 }
 
@@ -3884,20 +3972,8 @@
 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  // Push object.
-  if (obj->IsRegister()) {
-    __ push(ToRegister(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
-  // Push key.
-  if (key->IsConstantOperand()) {
-    EmitPushConstantOperand(key);
-  } else if (key->IsRegister()) {
-    __ push(ToRegister(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(obj);
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   LEnvironment* env = instr->deoptimization_environment();
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index f44fdb9..96e0a0f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -60,7 +60,8 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -124,7 +125,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -140,8 +141,8 @@
                        Register input,
                        Register temporary);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -156,12 +157,26 @@
   bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS
+  };
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode,
+                       int argc);
+
+
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
                 LInstruction* instr);
+
   void CallRuntime(const Runtime::Function* function,
                    int num_arguments,
                    LInstruction* instr);
+
   void CallRuntime(Runtime::FunctionId id,
                    int num_arguments,
                    LInstruction* instr) {
@@ -169,6 +184,11 @@
     CallRuntime(function, num_arguments, instr);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -177,7 +197,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode,
+                                  int argc);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -246,11 +268,12 @@
                      Handle<Map> type,
                      Handle<String> name);
 
-  // Emits code for pushing a constant operand.
-  void EmitPushConstantOperand(LOperand* operand);
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
 
   struct JumpTableEntry {
-    inline JumpTableEntry(Address entry)
+    explicit inline JumpTableEntry(Address entry)
         : label(),
           address(entry) { }
     Label label;
@@ -281,6 +304,27 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index cedd025..c3c617c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@
   } else if (source->IsDoubleRegister()) {
     XMMRegister src = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
-      __ movsd(cgen_->ToDoubleRegister(destination), src);
+      __ movaps(cgen_->ToDoubleRegister(destination), src);
     } else {
       ASSERT(destination->IsDoubleStackSlot());
       __ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@
     // Swap two double registers.
     XMMRegister source_reg = cgen_->ToDoubleRegister(source);
     XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
-    __ movsd(xmm0, source_reg);
-    __ movsd(source_reg, destination_reg);
-    __ movsd(destination_reg, xmm0);
+    __ movaps(xmm0, source_reg);
+    __ movaps(source_reg, destination_reg);
+    __ movaps(destination_reg, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index c47cd72..620bbc9 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -303,6 +302,13 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[rcx] #%d / ", arity());
 }
@@ -1114,9 +1120,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
@@ -1211,6 +1217,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1613,11 +1627,8 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
-        LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() &&
-             Isolate::Current()->cpu_features()->IsSupported(SSE3))
-            ? NULL
-            : FixedTemp(xmm1);
+        LOperand* xmm_temp = instr->CanTruncateToInt32() ? NULL
+                                                         : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
@@ -1718,21 +1729,36 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
-                                          TempRegister());
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LStoreGlobalCell* result =
+      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
   return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rdx);
+  LOperand* value = UseFixed(instr->value(), rax);
+  LStoreGlobalGeneric* result =  new LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new LLoadContextSlot(context));
@@ -1877,7 +1903,7 @@
       array_type == kExternalFloatArray;
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
-      : UseRegister(instr->key());
+      : UseRegister(instr->value());
   LOperand* key = UseRegister(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
@@ -1929,6 +1955,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1972,7 +2005,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2058,7 +2092,6 @@
       env->Push(value);
     }
   }
-  ASSERT(env->length() == instr->environment_length());
 
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index e94debf..74f4820 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -98,14 +98,15 @@
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
   V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
@@ -118,7 +119,8 @@
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -144,12 +146,14 @@
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
   V(StoreContextSlot)                           \
-  V(StoreGlobal)                                \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -1245,22 +1249,55 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LStoreGlobal(LOperand* value, LOperand* temp) {
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1358,6 +1395,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1582,6 +1636,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1637,12 +1692,29 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
   virtual void PrintDataTo(StringStream* stream);
 
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 654814c..3394206 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "assembler-x64.h"
 #include "macro-assembler-x64.h"
 #include "serialize.h"
@@ -40,12 +40,15 @@
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
-      root_array_available_(true),
-      code_object_(isolate()->heap()->undefined_value()) {
+      root_array_available_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -647,6 +650,7 @@
   Label leave_exit_frame;
   Label write_back;
 
+  Factory* factory = isolate()->factory();
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -694,7 +698,7 @@
 
   // Check if the function scheduled an exception.
   movq(rsi, scheduled_exception_address);
-  Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
+  Cmp(Operand(rsi, 0), factory->the_hole_value());
   j(not_equal, &promote_scheduled_exception);
 
   LeaveApiExitFrame();
@@ -709,7 +713,7 @@
 
   bind(&empty_result);
   // It was zero; the result is undefined.
-  Move(rax, FACTORY->undefined_value());
+  Move(rax, factory->undefined_value());
   jmp(&prologue);
 
   // HandleScope limit has changed. Delete allocated extensions.
@@ -785,10 +789,10 @@
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
-  } else if (is_int32(x)) {
-    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else if (is_uint32(x)) {
     movl(dst, Immediate(static_cast<uint32_t>(x)));
+  } else if (is_int32(x)) {
+    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else {
     movq(dst, x, RelocInfo::NONE);
   }
@@ -798,7 +802,7 @@
   if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
   } else {
-    movq(kScratchRegister, x, RelocInfo::NONE);
+    Set(kScratchRegister, x);
     movq(dst, kScratchRegister);
   }
 }
@@ -1244,12 +1248,17 @@
                             Register src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible.
-  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movq(dst, src1);
+    if (emit_debug_code()) {
+      movq(kScratchRegister, src1);
+      addq(kScratchRegister, src2);
+      Check(no_overflow, "Smi addition overflow");
+    }
+    lea(dst, Operand(src1, src2, times_1, 0));
+  } else {
+    addq(dst, src2);
+    Assert(no_overflow, "Smi addition overflow");
   }
-  addq(dst, src2);
-  Assert(no_overflow, "Smi addition overflow");
 }
 
 
@@ -1317,6 +1326,7 @@
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   or_(dst, src2);
@@ -1337,6 +1347,7 @@
 
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   xor_(dst, src2);
@@ -1809,7 +1820,7 @@
     // Set external caught exception to false.
     ExternalReference external_caught(
         Isolate::k_external_caught_exception_address, isolate());
-    movq(rax, Immediate(false));
+    Set(rax, static_cast<int64_t>(false));
     Store(external_caught, rax);
 
     // Set pending exception and rax to out of memory exception.
@@ -1890,7 +1901,7 @@
   Condition is_smi = CheckSmi(object);
   j(is_smi, &ok);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
-      FACTORY->heap_number_map());
+      isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
   bind(&ok);
 }
@@ -1997,7 +2008,7 @@
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand counter_operand = ExternalOperand(ExternalReference(counter));
-    movq(counter_operand, Immediate(value));
+    movl(counter_operand, Immediate(value));
   }
 }
 
@@ -2147,7 +2158,7 @@
   push(kScratchRegister);
   if (emit_debug_code()) {
     movq(kScratchRegister,
-         FACTORY->undefined_value(),
+         isolate()->factory()->undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
     cmpq(Operand(rsp, 0), kScratchRegister);
     Check(not_equal, "code object not properly patched");
@@ -2199,7 +2210,6 @@
 #endif
   // Optionally save all XMM registers.
   if (save_doubles) {
-    CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize +
         arg_stack_space * kPointerSize;
     subq(rsp, Immediate(space));
@@ -2216,8 +2226,8 @@
   const int kFrameAlignment = OS::ActivationFrameAlignment();
   if (kFrameAlignment > 0) {
     ASSERT(IsPowerOf2(kFrameAlignment));
-    movq(kScratchRegister, Immediate(-kFrameAlignment));
-    and_(rsp, kScratchRegister);
+    ASSERT(is_int8(kFrameAlignment));
+    and_(rsp, Immediate(-kFrameAlignment));
   }
 
   // Patch the saved entry sp.
@@ -2316,7 +2326,7 @@
   // Check the context is a global context.
   if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        FACTORY->global_context_map());
+        isolate()->factory()->global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
   }
 
@@ -2818,7 +2828,7 @@
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, FACTORY->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, false);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2851,9 +2861,6 @@
   ASSERT(frame_alignment != 0);
   ASSERT(num_arguments >= 0);
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Make stack end at alignment and allocate space for arguments and old rsp.
   movq(kScratchRegister, rsp);
   ASSERT(IsPowerOf2(frame_alignment));
@@ -2873,26 +2880,6 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
-  // Pass current isolate address as additional parameter.
-  if (num_arguments < kRegisterPassedArguments) {
-#ifdef _WIN64
-    // First four arguments are passed in registers on Windows.
-    Register arg_to_reg[] = {rcx, rdx, r8, r9};
-#else
-    // First six arguments are passed in registers on other platforms.
-    Register arg_to_reg[] = {rdi, rsi, rdx, rcx, r8, r9};
-#endif
-    Register reg = arg_to_reg[num_arguments];
-    LoadAddress(reg, ExternalReference::isolate_address());
-  } else {
-    // Push Isolate pointer after all parameters.
-    int argument_slots_on_stack =
-        ArgumentStackSlotsForCFunctionCall(num_arguments);
-    LoadAddress(kScratchRegister, ExternalReference::isolate_address());
-    movq(Operand(rsp, argument_slots_on_stack * kPointerSize),
-         kScratchRegister);
-  }
-
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -2901,7 +2888,6 @@
   call(function);
   ASSERT(OS::ActivationFrameAlignment() != 0);
   ASSERT(num_arguments >= 0);
-  num_arguments += 1;
   int argument_slots_on_stack =
       ArgumentStackSlotsForCFunctionCall(num_arguments);
   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
@@ -2909,7 +2895,9 @@
 
 
 CodePatcher::CodePatcher(byte* address, int size)
-    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+    : address_(address),
+      size_(size),
+      masm_(Isolate::Current(), address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1ee0fe0..4c17720 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // Prevent the use of the RootArray during the lifetime of this
   // scope object.
@@ -319,6 +323,16 @@
                                            Register src,
                                            int power);
 
+  // Perform the logical or of two smi values and return a smi value.
+  // If either argument is not a smi, jump to on_not_smis and retain
+  // the original values of source registers. The destination register
+  // may be changed if it's not one of the source registers.
+  template <typename LabelType>
+  void SmiOrIfSmis(Register dst,
+                   Register src1,
+                   Register src2,
+                   LabelType* on_not_smis);
+
 
   // Simple comparison of smis.  Both sides must be known smis to use these,
   // otherwise use Cmp.
@@ -1029,7 +1043,10 @@
   // may be bigger than 2^16 - 1.  Requires a scratch register.
   void Ret(int bytes_dropped, Register scratch);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // Copy length bytes from source to destination.
   // Uses scratch register internally (if you have a low-eight register
@@ -1076,6 +1093,10 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  static int SafepointRegisterStackIndex(Register reg) {
+    return SafepointRegisterStackIndex(reg.code());
+  }
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1779,6 +1800,24 @@
 
 
 template <typename LabelType>
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 LabelType* on_not_smis) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis);
+  }
+}
+
+
+template <typename LabelType>
 void MacroAssembler::JumpIfNotString(Register object,
                                      Register object_map,
                                      LabelType* not_string) {
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 269e7af..d4ccb0e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -114,7 +114,7 @@
 RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
     Mode mode,
     int registers_to_save)
-    : masm_(NULL, kRegExpCodeSize),
+    : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
       no_root_array_scope_(&masm_),
       code_relative_fixup_positions_(4),
       mode_(mode),
@@ -402,13 +402,14 @@
 #endif
     __ push(backtrack_stackpointer());
 
-    static const int num_arguments = 3;
+    static const int num_arguments = 4;
     __ PrepareCallCFunction(num_arguments);
 
     // Put arguments into parameter registers. Parameters are
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
+    //   Isolate* isolate
 #ifdef _WIN64
     // Compute and set byte_offset1 (start of capture).
     __ lea(rcx, Operand(rsi, rdx, times_1, 0));
@@ -416,6 +417,8 @@
     __ lea(rdx, Operand(rsi, rdi, times_1, 0));
     // Set byte_length.
     __ movq(r8, rbx);
+    // Isolate.
+    __ LoadAddress(r9, ExternalReference::isolate_address());
 #else  // AMD64 calling convention
     // Compute byte_offset2 (current position = rsi+rdi).
     __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -425,6 +428,8 @@
     __ movq(rsi, rax);
     // Set byte_length.
     __ movq(rdx, rbx);
+    // Isolate.
+    __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
     ExternalReference compare =
         ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
@@ -757,7 +762,7 @@
   __ j(above_equal, &stack_ok);
   // Exit with OutOfMemory exception. There is not enough space on the stack
   // for our working registers.
-  __ movq(rax, Immediate(EXCEPTION));
+  __ Set(rax, EXCEPTION);
   __ jmp(&exit_label_);
 
   __ bind(&stack_limit_hit);
@@ -794,7 +799,7 @@
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
     // page (a problem on Windows).
-    __ movq(rcx, Immediate(kRegisterZero));
+    __ Set(rcx, kRegisterZero);
     Label init_loop;
     __ bind(&init_loop);
     __ movq(Operand(rbp, rcx, times_1, 0), rax);
@@ -824,7 +829,7 @@
   LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
   __ jmp(&start_label_);
   __ bind(&at_start);
-  __ movq(current_character(), Immediate('\n'));
+  __ Set(current_character(), '\n');
   __ jmp(&start_label_);
 
 
@@ -852,7 +857,7 @@
         __ movl(Operand(rbx, i * kIntSize), rax);
       }
     }
-    __ movq(rax, Immediate(SUCCESS));
+    __ Set(rax, SUCCESS);
   }
 
   // Exit and return rax
@@ -919,16 +924,18 @@
 #endif
 
     // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments);
 #ifdef _WIN64
-    // Microsoft passes parameters in rcx, rdx.
+    // Microsoft passes parameters in rcx, rdx, r8.
     // First argument, backtrack stackpointer, is already in rcx.
     __ lea(rdx, Operand(rbp, kStackHighEnd));  // Second argument
+    __ LoadAddress(r8, ExternalReference::isolate_address());
 #else
-    // AMD64 ABI passes parameters in rdi, rsi.
+    // AMD64 ABI passes parameters in rdi, rsi, rdx.
     __ movq(rdi, backtrack_stackpointer());   // First argument.
     __ lea(rsi, Operand(rbp, kStackHighEnd));  // Second argument.
+    __ LoadAddress(rdx, ExternalReference::isolate_address());
 #endif
     ExternalReference grow_stack =
         ExternalReference::re_grow_stack(masm_.isolate());
@@ -952,7 +959,7 @@
     // If any of the code above needed to exit with an exception.
     __ bind(&exit_with_exception);
     // Exit with Result EXCEPTION(-1) to signal thrown exception.
-    __ movq(rax, Immediate(EXCEPTION));
+    __ Set(rax, EXCEPTION);
     __ jmp(&exit_label_);
   }
 
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index 5df3d54..0000000
--- a/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
-      reg.is(kScratchRegister) || reg.is(kRootRegister) ||
-      reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // rax
-    2,   // rcx
-    3,   // rdx
-    1,   // rbx
-    -1,  // rsp  Stack pointer.
-    -1,  // rbp  Frame pointer.
-    -1,  // rsi  Context.
-    4,   // rdi
-    5,   // r8
-    6,   // r9
-    -1,  // r10  Scratch register.
-    8,   // r11
-    -1,  // r12  Smi constant.
-    -1,  // r13  Roots array.  This is callee saved.
-    7,   // r14
-    9    // r15
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] =
-      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r15 };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-  // The non-reserved rdi register is live on JS function entry.
-  Use(rdi);  // JS function.
-}
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 65189f5..0000000
--- a/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  ASSERT(is_valid());
-  if (is_constant()) {
-    CodeGenerator* code_generator =
-        CodeGeneratorScope::Current(Isolate::Current());
-    Result fresh = code_generator->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    code_generator->masm()->Move(fresh.reg(), handle());
-    // This result becomes a copy of the fresh one.
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  }
-  ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
-  ASSERT(is_valid());
-  CodeGenerator* code_generator =
-      CodeGeneratorScope::Current(Isolate::Current());
-  if (!is_register() || !reg().is(target)) {
-    Result fresh = code_generator->allocator()->Allocate(target);
-    ASSERT(fresh.is_valid());
-    if (is_register()) {
-      code_generator->masm()->movq(fresh.reg(), reg());
-    } else {
-      ASSERT(is_constant());
-      code_generator->masm()->Move(fresh.reg(), handle());
-    }
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  } else if (is_register() && reg().is(target)) {
-    ASSERT(code_generator->has_valid_frame());
-    code_generator->frame()->Spill(target);
-    ASSERT(code_generator->allocator()->count(target) == 1);
-  }
-  ASSERT(is_register());
-  ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // This function is not used in 64-bit code.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
deleted file mode 100644
index a2884d9..0000000
--- a/src/x64/register-allocator-x64.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  static const int kNumRegisters = 10;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 7494fe0..c19d29d 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -399,7 +399,7 @@
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
                         masm->isolate());
-  __ movq(rax, Immediate(5));
+  __ Set(rax, 5);
   __ LoadAddress(rbx, ref);
 
   CEntryStub stub(1);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index 10c327a..0000000
--- a/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
-  // Registers live on entry to a JS frame:
-  //   rsp: stack pointer, points to return address from this function.
-  //   rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
-  //        Trampoline frame.
-  //   rsi: context of this function call.
-  //   rdi: pointer to this function object.
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  if (FLAG_debug_code) {
-    // Verify that rdi contains a JS function.  The following code
-    // relies on rax being available for use.
-    Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
-    __ Check(not_smi,
-             "VirtualFrame::Enter - rdi is not a function (smi check).");
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-    __ Check(equal,
-             "VirtualFrame::Enter - rdi is not a function (map check).");
-  }
-#endif
-
-  EmitPush(rbp);
-
-  __ movq(rbp, rsp);
-
-  // Store the context in the frame.  The context is kept in rsi and a
-  // copy is stored in the frame.  The external reference to rsi
-  // remains.
-  EmitPush(rsi);
-
-  // Store the function in the frame.  The frame owns the register
-  // reference now (ie, it can keep it in rdi or spill it later).
-  Push(rdi);
-  SyncElementAt(element_count() - 1);
-  cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See GenerateReturnSequence for the full return sequence.
-  // TODO(X64): A patched call will be very long now.  Make sure we
-  // have enough room.
-  __ movq(rsp, rbp);
-  stack_pointer_ = frame_pointer();
-  for (int i = element_count() - 1; i > stack_pointer_; i--) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      Unuse(last.reg());
-    }
-  }
-
-  EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    // The locals are initialized to a constant (the undefined value), but
-    // we sync them with the actual frame to allocate space for spilling
-    // them later.  First sync everything above the stack pointer so we can
-    // use pushes to allocate and initialize the locals.
-    SyncRange(stack_pointer_ + 1, element_count() - 1);
-    Handle<Object> undefined = FACTORY->undefined_value();
-    FrameElement initial_value =
-        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count < kLocalVarBound) {
-      // For fewer locals the unrolled loop is more compact.
-
-      // Hope for one of the first eight registers, where the push operation
-      // takes only one byte (kScratchRegister needs the REX.W bit).
-      Result tmp = cgen()->allocator()->Allocate();
-      ASSERT(tmp.is_valid());
-      __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
-      for (int i = 0; i < count; i++) {
-        __ push(tmp.reg());
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      Result cnt = cgen()->allocator()->Allocate();
-      ASSERT(cnt.is_valid());
-      __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
-      Label loop_size;
-      __ bind(&loop_size);
-#endif
-      if (is_uint8(count)) {
-        // Loading imm8 is shorter than loading imm32.
-        // Loading only partial byte register, and using decb below.
-        __ movb(cnt.reg(), Immediate(count));
-      } else {
-        __ movl(cnt.reg(), Immediate(count));
-      }
-      __ bind(&alloc_locals_loop);
-      __ push(kScratchRegister);
-      if (is_uint8(count)) {
-        __ decb(cnt.reg());
-      } else {
-        __ decl(cnt.reg());
-      }
-      __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
-      CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
-    }
-    for (int i = 0; i < count; i++) {
-      elements_.Add(initial_value);
-      stack_pointer_++;
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen()->allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ lea(temp.reg(), ParameterAt(-1));
-  Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
-  stack_pointer_++;
-  __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  TypeInfo info = TypeInfo::TypeFromValue(value);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
-  ASSERT(expr->IsTrivial());
-
-  Literal* lit = expr->AsLiteral();
-  if (lit != NULL) {
-    Push(lit->handle());
-    return;
-  }
-
-  VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL) {
-    Slot* slot = proxy->var()->AsSlot();
-    if (slot->type() == Slot::LOCAL) {
-      PushLocalAt(slot->index());
-      return;
-    }
-    if (slot->type() == Slot::PARAMETER) {
-      PushParameterAt(slot->index());
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  if (ConstantPoolOverflowed()) {
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    if (value->IsSmi()) {
-      __ Move(temp.reg(), Smi::cast(*value));
-    } else {
-      __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
-    }
-    Push(&temp);
-  } else {
-    FrameElement element =
-        FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-    elements_.Add(element);
-  }
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ addq(rsp, Immediate(num_dropped * kPointerSize));
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  FrameElement original = elements_[index];
-
-  // Is this element the backing store of any copies?
-  int new_backing_index = kIllegalIndex;
-  if (original.is_copied()) {
-    // Verify it is copied, and find first copy.
-    for (int i = index + 1; i < element_count(); i++) {
-      if (elements_[i].is_copy() && elements_[i].index() == index) {
-        new_backing_index = i;
-        break;
-      }
-    }
-  }
-
-  if (new_backing_index == kIllegalIndex) {
-    // No copies found, return kIllegalIndex.
-    if (original.is_register()) {
-      Unuse(original.reg());
-    }
-    elements_[index] = FrameElement::InvalidElement();
-    return kIllegalIndex;
-  }
-
-  // This is the backing store of copies.
-  Register backing_reg;
-  if (original.is_memory()) {
-    Result fresh = cgen()->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    Use(fresh.reg(), new_backing_index);
-    backing_reg = fresh.reg();
-    __ movq(backing_reg, Operand(rbp, fp_relative(index)));
-  } else {
-    // The original was in a register.
-    backing_reg = original.reg();
-    set_register_location(backing_reg, new_backing_index);
-  }
-  // Invalidate the element at index.
-  elements_[index] = FrameElement::InvalidElement();
-  // Set the new backing element.
-  if (elements_[new_backing_index].is_synced()) {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::SYNCED,
-                                      original.type_info());
-  } else {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::NOT_SYNCED,
-                                      original.type_info());
-  }
-  // Update the other copies.
-  for (int i = new_backing_index + 1; i < element_count(); i++) {
-    if (elements_[i].is_copy() && elements_[i].index() == index) {
-      elements_[i].set_index(new_backing_index);
-      elements_[new_backing_index].set_copied();
-    }
-  }
-  return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  int new_backing_store_index = InvalidateFrameSlotAt(index);
-  if (new_backing_store_index != kIllegalIndex) {
-    elements_.Add(CopyElementAt(new_backing_store_index));
-    return;
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY: {
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      Use(fresh.reg(), element_count());
-      elements_.Add(new_element);
-      __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
-      break;
-    }
-    case FrameElement::REGISTER:
-      Use(original.reg(), element_count());
-      // Fall through.
-    case FrameElement::CONSTANT:
-    case FrameElement::COPY:
-      original.clear_sync();
-      elements_.Add(original);
-      break;
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  // Store the value on top of the frame to the virtual frame slot at
-  // a given index.  The value on top of the frame is left in place.
-  // This is a duplicating operation, so it can create copies.
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  int top_index = element_count() - 1;
-  FrameElement top = elements_[top_index];
-  FrameElement original = elements_[index];
-  if (top.is_copy() && top.index() == index) return;
-  ASSERT(top.is_valid());
-
-  InvalidateFrameSlotAt(index);
-
-  // InvalidateFrameSlotAt can potentially change any frame element, due
-  // to spilling registers to allocate temporaries in order to preserve
-  // the copy-on-write semantics of aliased elements.  Reload top from
-  // the frame.
-  top = elements_[top_index];
-
-  if (top.is_copy()) {
-    // There are two cases based on the relative positions of the
-    // stored-to slot and the backing slot of the top element.
-    int backing_index = top.index();
-    ASSERT(backing_index != index);
-    if (backing_index < index) {
-      // 1. The top element is a copy of a slot below the stored-to
-      // slot.  The stored-to slot becomes an unsynced copy of that
-      // same backing slot.
-      elements_[index] = CopyElementAt(backing_index);
-    } else {
-      // 2. The top element is a copy of a slot above the stored-to
-      // slot.  The stored-to slot becomes the new (unsynced) backing
-      // slot and both the top element and the element at the former
-      // backing slot become copies of it.  The sync state of the top
-      // and former backing elements is preserved.
-      FrameElement backing_element = elements_[backing_index];
-      ASSERT(backing_element.is_memory() || backing_element.is_register());
-      if (backing_element.is_memory()) {
-        // Because sets of copies are canonicalized to be backed by
-        // their lowest frame element, and because memory frame
-        // elements are backed by the corresponding stack address, we
-        // have to move the actual value down in the stack.
-        //
-        // TODO(209): considering allocating the stored-to slot to the
-        // temp register.  Alternatively, allow copies to appear in
-        // any order in the frame and lazily move the value down to
-        // the slot.
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        set_register_location(backing_element.reg(), index);
-        if (backing_element.is_synced()) {
-          // If the element is a register, we will not actually move
-          // anything on the stack but only update the virtual frame
-          // element.
-          backing_element.clear_sync();
-        }
-      }
-      elements_[index] = backing_element;
-
-      // The old backing element becomes a copy of the new backing
-      // element.
-      FrameElement new_element = CopyElementAt(index);
-      elements_[backing_index] = new_element;
-      if (backing_element.is_synced()) {
-        elements_[backing_index].set_sync();
-      }
-
-      // All the copies of the old backing element (including the top
-      // element) become copies of the new backing element.
-      for (int i = backing_index + 1; i < element_count(); i++) {
-        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
-          elements_[i].set_index(index);
-        }
-      }
-    }
-    return;
-  }
-
-  // Move the top element to the stored-to slot and replace it (the
-  // top element) with a copy.
-  elements_[index] = top;
-  if (top.is_memory()) {
-    // TODO(209): consider allocating the stored-to slot to the temp
-    // register.  Alternatively, allow copies to appear in any order
-    // in the frame and lazily move the value down to the slot.
-    FrameElement new_top = CopyElementAt(index);
-    new_top.set_sync();
-    elements_[top_index] = new_top;
-
-    // The sync state of the former top element is correct (synced).
-    // Emit code to move the value down in the frame.
-    __ movq(kScratchRegister, Operand(rsp, 0));
-    __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-  } else if (top.is_register()) {
-    set_register_location(top.reg(), index);
-    // The stored-to slot has the (unsynced) register reference and
-    // the top element becomes a copy.  The sync state of the top is
-    // preserved.
-    FrameElement new_top = CopyElementAt(index);
-    if (top.is_synced()) {
-      new_top.set_sync();
-      elements_[index].clear_sync();
-    }
-    elements_[top_index] = new_top;
-  } else {
-    // The stored-to slot holds the same value as the top but
-    // unsynced.  (We do not have copies of constants yet.)
-    ASSERT(top.is_constant());
-    elements_[index].clear_sync();
-  }
-}
-
-
-void VirtualFrame::MakeMergable() {
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement element = elements_[i];
-
-    // In all cases we have to reset the number type information
-    // to unknown for a mergable frame because of incoming back edges.
-    if (element.is_constant() || element.is_copy()) {
-      if (element.is_synced()) {
-        // Just spill.
-        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
-      } else {
-        // Allocate to a register.
-        FrameElement backing_element;  // Invalid if not a copy.
-        if (element.is_copy()) {
-          backing_element = elements_[element.index()];
-        }
-        Result fresh = cgen()->allocator()->Allocate();
-        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
-        elements_[i] =
-            FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED,
-                                          TypeInfo::Unknown());
-        Use(fresh.reg(), i);
-
-        // Emit a move.
-        if (element.is_constant()) {
-          __ Move(fresh.reg(), element.handle());
-        } else {
-          ASSERT(element.is_copy());
-          // Copies are only backed by register or memory locations.
-          if (backing_element.is_register()) {
-            // The backing store may have been spilled by allocating,
-            // but that's OK.  If it was, the value is right where we
-            // want it.
-            if (!fresh.reg().is(backing_element.reg())) {
-              __ movq(fresh.reg(), backing_element.reg());
-            }
-          } else {
-            ASSERT(backing_element.is_memory());
-            __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
-          }
-        }
-      }
-      // No need to set the copied flag --- there are no copies.
-    } else {
-      // Clear the copy flag of non-constant, non-copy elements.
-      // They cannot be copied because copies are not allowed.
-      // The copy flag is not relied on before the end of this loop,
-      // including when registers are spilled.
-      elements_[i].clear_copied();
-      elements_[i].set_type_info(TypeInfo::Unknown());
-    }
-  }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm(), "[ Merge frame");
-  // We should always be merging the code generator's current frame to an
-  // expected frame.
-  ASSERT(cgen()->frame() == this);
-
-  // Adjust the stack pointer upward (toward the top of the virtual
-  // frame) if necessary.
-  if (stack_pointer_ < expected->stack_pointer_) {
-    int difference = expected->stack_pointer_ - stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ subq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  MergeMoveRegistersToMemory(expected);
-  MergeMoveRegistersToRegisters(expected);
-  MergeMoveMemoryToRegisters(expected);
-
-  // Adjust the stack pointer downward if necessary.
-  if (stack_pointer_ > expected->stack_pointer_) {
-    int difference = stack_pointer_ - expected->stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ addq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  // At this point, the frames should be identical.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  // Move registers, constants, and copies to memory.  Perform moves
-  // from the top downward in the frame in order to leave the backing
-  // stores of copies in registers.
-  for (int i = element_count() - 1; i >= 0; i--) {
-    FrameElement target = expected->elements_[i];
-    if (target.is_register()) continue;  // Handle registers later.
-    if (target.is_memory()) {
-      FrameElement source = elements_[i];
-      switch (source.type()) {
-        case FrameElement::INVALID:
-          // Not a legal merge move.
-          UNREACHABLE();
-          break;
-
-        case FrameElement::MEMORY:
-          // Already in place.
-          break;
-
-        case FrameElement::REGISTER:
-          Unuse(source.reg());
-          if (!source.is_synced()) {
-            __ movq(Operand(rbp, fp_relative(i)), source.reg());
-          }
-          break;
-
-        case FrameElement::CONSTANT:
-          if (!source.is_synced()) {
-            __ Move(Operand(rbp, fp_relative(i)), source.handle());
-          }
-          break;
-
-        case FrameElement::COPY:
-          if (!source.is_synced()) {
-            int backing_index = source.index();
-            FrameElement backing_element = elements_[backing_index];
-            if (backing_element.is_memory()) {
-              __ movq(kScratchRegister,
-                       Operand(rbp, fp_relative(backing_index)));
-              __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
-            } else {
-              ASSERT(backing_element.is_register());
-              __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
-            }
-          }
-          break;
-      }
-    }
-    elements_[i] = target;
-  }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  // We have already done X-to-memory moves.
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    // Move the right value into register i if it is currently in a register.
-    int index = expected->register_location(i);
-    int use_index = register_location(i);
-    // Skip if register i is unused in the target or else if source is
-    // not a register (this is not a register-to-register move).
-    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
-    Register target = RegisterAllocator::ToRegister(i);
-    Register source = elements_[index].reg();
-    if (index != use_index) {
-      if (use_index == kIllegalIndex) {  // Target is currently unused.
-        // Copy contents of source from source to target.
-        // Set frame element register to target.
-        Use(target, index);
-        Unuse(source);
-        __ movq(target, source);
-      } else {
-        // Exchange contents of registers source and target.
-        // Nothing except the register backing use_index has changed.
-        elements_[use_index].set_reg(source);
-        set_register_location(target, index);
-        set_register_location(source, use_index);
-        __ xchg(source, target);
-      }
-    }
-
-    if (!elements_[index].is_synced() &&
-        expected->elements_[index].is_synced()) {
-      __ movq(Operand(rbp, fp_relative(index)), target);
-    }
-    elements_[index] = expected->elements_[index];
-  }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  // Move memory, constants, and copies to registers.  This is the
-  // final step and since it is not done from the bottom up, but in
-  // register code order, we have special code to ensure that the backing
-  // elements of copies are in their correct locations when we
-  // encounter the copies.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int index = expected->register_location(i);
-    if (index != kIllegalIndex) {
-      FrameElement source = elements_[index];
-      FrameElement target = expected->elements_[index];
-      Register target_reg = RegisterAllocator::ToRegister(i);
-      ASSERT(target.reg().is(target_reg));
-      switch (source.type()) {
-        case FrameElement::INVALID:  // Fall through.
-          UNREACHABLE();
-          break;
-        case FrameElement::REGISTER:
-          ASSERT(source.Equals(target));
-          // Go to next iteration.  Skips Use(target_reg) and syncing
-          // below.  It is safe to skip syncing because a target
-          // register frame element would only be synced if all source
-          // elements were.
-          continue;
-          break;
-        case FrameElement::MEMORY:
-          ASSERT(index <= stack_pointer_);
-          __ movq(target_reg, Operand(rbp, fp_relative(index)));
-          break;
-
-        case FrameElement::CONSTANT:
-          __ Move(target_reg, source.handle());
-          break;
-
-        case FrameElement::COPY: {
-          int backing_index = source.index();
-          FrameElement backing = elements_[backing_index];
-          ASSERT(backing.is_memory() || backing.is_register());
-          if (backing.is_memory()) {
-            ASSERT(backing_index <= stack_pointer_);
-            // Code optimization if backing store should also move
-            // to a register: move backing store to its register first.
-            if (expected->elements_[backing_index].is_register()) {
-              FrameElement new_backing = expected->elements_[backing_index];
-              Register new_backing_reg = new_backing.reg();
-              ASSERT(!is_used(new_backing_reg));
-              elements_[backing_index] = new_backing;
-              Use(new_backing_reg, backing_index);
-              __ movq(new_backing_reg,
-                      Operand(rbp, fp_relative(backing_index)));
-              __ movq(target_reg, new_backing_reg);
-            } else {
-              __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
-            }
-          } else {
-            __ movq(target_reg, backing.reg());
-          }
-        }
-      }
-      // Ensure the proper sync state.
-      if (target.is_synced() && !source.is_synced()) {
-        __ movq(Operand(rbp, fp_relative(index)), target_reg);
-      }
-      Use(target_reg, index);
-      elements_[index] = target;
-    }
-  }
-}
-
-
-Result VirtualFrame::Pop() {
-  FrameElement element = elements_.RemoveLast();
-  int index = element_count();
-  ASSERT(element.is_valid());
-
-  // Get number type information of the result.
-  TypeInfo info;
-  if (!element.is_copy()) {
-    info = element.type_info();
-  } else {
-    info = elements_[element.index()].type_info();
-  }
-
-  bool pop_needed = (stack_pointer_ == index);
-  if (pop_needed) {
-    stack_pointer_--;
-    if (element.is_memory()) {
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ pop(temp.reg());
-      temp.set_type_info(info);
-      return temp;
-    }
-
-    __ addq(rsp, Immediate(kPointerSize));
-  }
-  ASSERT(!element.is_memory());
-
-  // The top element is a register, constant, or a copy.  Unuse
-  // registers and follow copies to their backing store.
-  if (element.is_register()) {
-    Unuse(element.reg());
-  } else if (element.is_copy()) {
-    ASSERT(element.index() < index);
-    index = element.index();
-    element = elements_[index];
-  }
-  ASSERT(!element.is_copy());
-
-  // The element is memory, a register, or a constant.
-  if (element.is_memory()) {
-    // Memory elements could only be the backing store of a copy.
-    // Allocate the original to a register.
-    ASSERT(index <= stack_pointer_);
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    Use(temp.reg(), index);
-    FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(),
-                                      FrameElement::SYNCED,
-                                      element.type_info());
-    // Preserve the copy flag on the element.
-    if (element.is_copied()) new_element.set_copied();
-    elements_[index] = new_element;
-    __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
-    return Result(temp.reg(), info);
-  } else if (element.is_register()) {
-    return Result(element.reg(), info);
-  } else {
-    ASSERT(element.is_constant());
-    return Result(element.handle());
-  }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallStub(stub);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
-  PrepareForCall(0, 0);
-  arg->ToRegister(rax);
-  arg->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  PrepareForCall(0, 0);
-
-  if (arg0->is_register() && arg0->reg().is(rax)) {
-    if (arg1->is_register() && arg1->reg().is(rdx)) {
-      // Wrong registers.
-      __ xchg(rax, rdx);
-    } else {
-      // Register rdx is free for arg0, which frees rax for arg1.
-      arg0->ToRegister(rdx);
-      arg1->ToRegister(rax);
-    }
-  } else {
-    // Register rax is free for arg1, which guarantees rdx is free for
-    // arg0.
-    arg1->ToRegister(rax);
-    arg0->ToRegister(rdx);
-  }
-
-  arg0->Unuse();
-  arg1->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
-  Result function = Pop();
-
-  // InvokeFunction requires function in rdi.  Move it in there.
-  function.ToRegister(rdi);
-  function.Unuse();
-
-  // +1 for receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(rdi, count, CALL_FUNCTION);
-  RestoreContextRegister();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  // Emit code to write elements below the stack pointer to their
-  // (already allocated) stack address.
-  ASSERT(index <= stack_pointer_);
-  FrameElement element = elements_[index];
-  ASSERT(!element.is_synced());
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      break;
-
-    case FrameElement::MEMORY:
-      // This function should not be called with synced elements.
-      // (memory elements are always synced).
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ movq(Operand(rbp, fp_relative(index)), element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(Operand(rbp, fp_relative(index)), element.handle());
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing_element = elements_[backing_index];
-      if (backing_element.is_memory()) {
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        ASSERT(backing_element.is_register());
-        __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
-  // Sync an element of the frame that is just above the stack pointer
-  // by pushing it.
-  ASSERT(index == stack_pointer_ + 1);
-  stack_pointer_++;
-  FrameElement element = elements_[index];
-
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      __ Push(Smi::FromInt(0));
-      break;
-
-    case FrameElement::MEMORY:
-      // No memory elements exist above the stack pointer.
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ push(element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(kScratchRegister, element.handle());
-      __ push(kScratchRegister);
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing = elements_[backing_index];
-      ASSERT(backing.is_memory() || backing.is_register());
-      if (backing.is_memory()) {
-        __ push(Operand(rbp, fp_relative(backing_index)));
-      } else {
-        __ push(backing.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
-  ASSERT(begin >= 0);
-  ASSERT(end < element_count());
-  // Sync elements below the range if they have not been materialized
-  // on the stack.
-  int start = Min(begin, stack_pointer_ + 1);
-  int end_or_stack_pointer = Min(stack_pointer_, end);
-  // Emit normal push instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
-  int i = start;
-
-  while (i <= end_or_stack_pointer) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
-    i++;
-  }
-  while (i <= end) {
-    SyncElementByPushing(i);
-    i++;
-  }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  PrepareForCall(0, 0);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
-                                          Result* b,
-                                          Register a_reg,
-                                          Register b_reg) {
-  ASSERT(!a_reg.is(b_reg));
-  // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(a_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
-         (a->is_register() && a->reg().is(a_reg)) ||
-         (b->is_register() && b->reg().is(a_reg)));
-  // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(b_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
-         (a->is_register() && a->reg().is(b_reg)) ||
-         (b->is_register() && b->reg().is(b_reg)));
-
-  if (a->is_register() && a->reg().is(a_reg)) {
-    b->ToRegister(b_reg);
-  } else if (!cgen()->allocator()->is_used(a_reg)) {
-    a->ToRegister(a_reg);
-    b->ToRegister(b_reg);
-  } else if (cgen()->allocator()->is_used(b_reg)) {
-    // a must be in b_reg, b in a_reg.
-    __ xchg(a_reg, b_reg);
-    // Results a and b will be invalidated, so it is ok if they are switched.
-  } else {
-    b->ToRegister(b_reg);
-    a->ToRegister(a_reg);
-  }
-  a->Unuse();
-  b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
-  // Name and receiver are on the top of the frame.  Both are dropped.
-  // The IC expects name in rcx and receiver in rax.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  Result name = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame. Put them in rax and rdx.
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
-                                 bool is_contextual,
-                                 StrictModeFlag strict_mode) {
-  // Value and (if not contextual) receiver are on top of the frame.
-  // The IC expects name in rcx, value in rax, and receiver in rdx.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-  Result value = Pop();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    PrepareForCall(0, 0);
-    value.ToRegister(rax);
-    __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    value.Unuse();
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    Result receiver = Pop();
-    PrepareForCall(0, 0);
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ Move(rcx, name);
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in rax, key in rcx, and receiver in rdx.
-  Result value = Pop();
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  if (!cgen()->allocator()->is_used(rax) ||
-      (value.is_register() && value.reg().is(rax))) {
-    if (!cgen()->allocator()->is_used(rax)) {
-      value.ToRegister(rax);
-    }
-    MoveResultsToRegisters(&key, &receiver, rcx, rdx);
-    value.Unuse();
-  } else if (!cgen()->allocator()->is_used(rcx) ||
-             (key.is_register() && key.reg().is(rcx))) {
-    if (!cgen()->allocator()->is_used(rcx)) {
-      key.ToRegister(rcx);
-    }
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    key.Unuse();
-  } else if (!cgen()->allocator()->is_used(rdx) ||
-             (receiver.is_register() && receiver.reg().is(rdx))) {
-    if (!cgen()->allocator()->is_used(rdx)) {
-      receiver.ToRegister(rdx);
-    }
-    MoveResultsToRegisters(&key, &value, rcx, rax);
-    receiver.Unuse();
-  } else {
-    // All three registers are used, and no value is in the correct place.
-    // We have one of the two circular permutations of rax, rcx, rdx.
-    ASSERT(value.is_register());
-    if (value.reg().is(rcx)) {
-      __ xchg(rax, rdx);
-      __ xchg(rax, rcx);
-    } else {
-      __ xchg(rax, rcx);
-      __ xchg(rax, rdx);
-    }
-    value.Unuse();
-    key.Unuse();
-    receiver.Unuse();
-  }
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
-                                int arg_count,
-                                int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
-                                     int arg_count,
-                                     int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
-  // Arguments, receiver, and function are on top of the frame.  The
-  // IC expects arg count in rax, function in rdi, and the arguments
-  // and receiver on the stack.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  // Duplicate the function before preparing the frame.
-  PushElementAt(arg_count);
-  Result function = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill function and args.
-  function.ToRegister(rdi);
-
-  // Constructors are called with the number of arguments in register
-  // rax for now. Another option would be to have separate construct
-  // call trampolines per different arguments counts encountered.
-  Result num_args = cgen()->allocator()->Allocate(rax);
-  ASSERT(num_args.is_valid());
-  __ Set(num_args.reg(), arg_count);
-
-  function.Unuse();
-  num_args.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less one (the return
-  // address is already pushed by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
deleted file mode 100644
index aac9864..0000000
--- a/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
-      ASSERT(cgen()->has_valid_frame());
-      cgen()->frame()->SpillAll();
-      cgen()->set_in_spilled_code(true);
-    }
-
-    ~SpilledScope() {
-      cgen()->set_in_spilled_code(previous_state_);
-    }
-
-   private:
-    bool previous_state_;
-
-    CodeGenerator* cgen() {
-      return CodeGeneratorScope::Current(Isolate::Current());
-    }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  CodeGenerator* cgen() {
-    return CodeGeneratorScope::Current(Isolate::Current());
-  }
-
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-    TypeInfo info = TypeInfo::Uninitialized());
-
-  // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
-
-  // The height of the virtual expression stack.
-  int height() {
-    return element_count() - expression_base_index();
-  }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  inline int register_location(Register reg);
-
-  inline void set_register_location(Register reg, int index);
-
-  bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  inline bool is_used(Register reg);
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget count elements from the top of the frame all in-memory
-  // (including synced) and adjust the stack pointer downward, to
-  // match an external frame effect (examples include a call removing
-  // its arguments, and exiting a try/catch removing an exception
-  // handler).  No code will be emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    ForgetElements(count);
-  }
-
-  // Forget count elements from the top of the frame without adjusting
-  // the stack pointer downward.  This is used, for example, before
-  // merging frames at break, continue, and return targets.
-  void ForgetElements(int count);
-
-  // Spill all values from the frame to memory.
-  inline void SpillAll();
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
-  Register SpillAnyRegister();
-
-  // Spill the top element of the frame to memory.
-  void SpillTop() { SpillElementAt(element_count() - 1); }
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the frame.
-  void MakeMergable();
-
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected);
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Use(i);
-    }
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by spilling locals.  This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 14;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // An element of the expression stack as an assembly operand.
-  Operand ElementAt(int index) const {
-    return Operand(rsp, index * kPointerSize);
-  }
-
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  inline void SetElementAt(int index, Handle<Object> value);
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
-  void StoreToElementAt(int index) {
-    StoreToFrameSlotAt(element_count() - index - 1);
-  }
-
-  // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return Operand(rbp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // Push the function on top of the frame.
-  void PushFunction() { PushFrameSlotAt(function_index()); }
-
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
-  // A parameter as an assembly operand.
-  Operand ParameterAt(int index) {
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count());
-    return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
-  // The receiver frame slot.
-  Operand Receiver() { return ParameterAt(-1); }
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline Result CallStub(CodeStub* stub, int arg_count);
-
-  // Call stub that takes a single argument passed in eax.  The
-  // argument is given as a result which does not have to be eax or
-  // even a register.  The argument is consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg);
-
-  // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
-  // eax (arg1, rax).  The arguments are given as results which do not have
-  // to be in the proper registers or even in registers.  The
-  // arguments are consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  Result CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  Result CallRuntime(const Runtime::Function* f, int arg_count);
-  Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  Result InvokeBuiltin(Builtins::JavaScript id,
-                       InvokeFlag flag,
-                       int arg_count);
-
-  // Call load IC.  Name and receiver are found on top of the frame.
-  // Both are dropped.
-  Result CallLoadIC(RelocInfo::Mode mode);
-
-  // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  Both are dropped.
-  Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
-  // Call store IC.  If the load is contextual, value is found on top of the
-  // frame.  If not, value and receiver are on the frame.  Both are dropped.
-  Result CallStoreIC(Handle<String> name, bool is_contextual,
-                     StrictModeFlag strict_mode);
-
-  // Call keyed store IC.  Value, key, and receiver are found on top
-  Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call call IC.  Function name, arguments, and receiver are found on top
-  // of the frame and dropped by the call.
-  // The argument count does not include the receiver.
-  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Call keyed call IC.  Same calling convention as CallCallIC.
-  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Allocate and call JS function as constructor.  Arguments,
-  // receiver (global object), and function are found on top of the
-  // frame.  Function is not dropped.  The argument count does not
-  // include the receiver.
-  Result CallConstructor(int arg_count);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Duplicate the top element of the frame.
-  void Dup() { PushFrameSlotAt(element_count() - 1); }
-
-  // Duplicate the n'th element from the top of the frame.
-  // Dup(1) is equivalent to Dup().
-  void Dup(int n) {
-    ASSERT(n > 0);
-    PushFrameSlotAt(element_count() - n);
-  }
-
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  void EmitPop(const Operand& operand);
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(const Operand& operand,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Heap::RootListIndex index,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Immediate immediate,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Smi* value);
-  // Uses kScratchRegister, emits appropriate relocation info.
-  void EmitPush(Handle<Object> value);
-
-  inline bool ConstantPoolOverflowed();
-
-  // Push an element on the virtual frame.
-  void Push(Handle<Object> value);
-  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
-  inline void Push(Smi* value);
-
-  // Pushing a result invalidates it (its contents become owned by the
-  // frame).
-  void Push(Result* result) {
-    if (result->is_register()) {
-      Push(result->reg(), result->type_info());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    result->Unuse();
-  }
-
-  // Pushing an expression expects that the expression is trivial (according
-  // to Expression::IsTrivial).
-  void Push(Expression* expr);
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  inline void SetTypeForLocalAt(int index, TypeInfo info);
-  inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  ZoneList<FrameElement> elements_;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the esp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count();
-  inline int local_count();
-
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).  The parameters, receiver, and return address
-  // are below the frame pointer.
-  int frame_pointer() { return parameter_count() + 2; }
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame.  It is immediately
-  // above the frame pointer.
-  int context_index() { return frame_pointer() + 1; }
-
-  // The index of the function slot in the frame.  It is above the frame
-  // pointer and the context slot.
-  int function_index() { return frame_pointer() + 2; }
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lie the context and the function.
-  int local0_index() { return frame_pointer() + 3; }
-
-  // The index of the base of the expression stack.
-  int expression_base_index() { return local0_index() + local_count(); }
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  // This function assumes that a and b are the only results that could be in
-  // the registers a_reg or b_reg.  Other results can be live, but must not
-  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
-  void MoveResultsToRegisters(Result* a,
-                              Result* b,
-                              Register a_reg,
-                              Register b_reg);
-
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  Result RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class FrameRegisterState;
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_VIRTUAL_FRAME_X64_H_