Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index c7ec6d9..d957872 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -40,7 +40,7 @@
 #include "src/ia32/assembler-ia32.h"
 
 #include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
 
 namespace v8 {
 namespace internal {
@@ -53,35 +53,25 @@
 
 
 // The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
-  bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
+void RelocInfo::apply(intptr_t delta) {
   if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
-  } else if (rmode_ == CODE_AGE_SEQUENCE) {
+  } else if (IsCodeAgeSequence(rmode_)) {
     if (*pc_ == kCallOpcode) {
       int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
       *p -= delta;  // Relocate entry.
-    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
     }
-  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
-    // Special handling of js_return when a break point is set (call
-    // instruction has been inserted).
-    int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
-    *p -= delta;  // Relocate entry.
-    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
-  } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+  } else if (IsDebugBreakSlot(rmode_) && IsPatchedDebugBreakSlotSequence()) {
     // Special handling of a debug break slot when a break point is set (call
     // instruction has been inserted).
-    int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+    int32_t* p = reinterpret_cast<int32_t*>(
+        pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p += delta;  // Relocate entry.
-    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   }
 }
 
@@ -114,7 +104,8 @@
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
-  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+  Assembler::set_target_address_at(isolate_, pc_, host_, target,
+                                   icache_flush_mode);
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       IsCodeTarget(rmode_)) {
@@ -143,7 +134,7 @@
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+    Assembler::FlushICache(isolate_, pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
@@ -154,12 +145,24 @@
 }
 
 
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
   DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
   return Memory::Address_at(pc_);
 }
 
 
+Address RelocInfo::target_internal_reference() {
+  DCHECK(rmode_ == INTERNAL_REFERENCE);
+  return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+  DCHECK(rmode_ == INTERNAL_REFERENCE);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
   DCHECK(IsRuntimeEntry(rmode_));
   return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
@@ -192,11 +195,12 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
+  DCHECK(cell->IsCell());
   DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+    Assembler::FlushICache(isolate_, pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
     // TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -226,22 +230,22 @@
                                   ICacheFlushMode icache_flush_mode) {
   DCHECK(*pc_ == kCallOpcode);
   DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
-                                   icache_flush_mode);
+  Assembler::set_target_address_at(
+      isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
 }
 
 
-Address RelocInfo::call_address() {
-  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
-         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
-  return Assembler::target_address_at(pc_ + 1, host_);
+Address RelocInfo::debug_call_address() {
+  DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+  Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+  return Assembler::target_address_at(location, host_);
 }
 
 
-void RelocInfo::set_call_address(Address target) {
-  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
-         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
-  Assembler::set_target_address_at(pc_ + 1, host_, target);
+void RelocInfo::set_debug_call_address(Address target) {
+  DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+  Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+  Assembler::set_target_address_at(isolate_, location, host_, target);
   if (host() != NULL) {
     Object* target_code = Code::GetCodeFromTargetAddress(target);
     host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -250,29 +254,14 @@
 }
 
 
-Object* RelocInfo::call_object() {
-  return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
-  *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
-  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
-         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
-  return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
 void RelocInfo::WipeOut() {
-  if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+  if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+      IsInternalReference(rmode_)) {
     Memory::Address_at(pc_) = NULL;
   } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
     // Effectively write zero into the relocation.
-    Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+    Assembler::set_target_address_at(isolate_, pc_, host_,
+                                     pc_ + sizeof(int32_t));
   } else {
     UNREACHABLE();
   }
@@ -293,21 +282,19 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     visitor->VisitEmbeddedPointer(this);
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+    Assembler::FlushICache(isolate, pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::CELL) {
     visitor->VisitCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(this);
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+  } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+    visitor->VisitInternalReference(this);
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     visitor->VisitCodeAgeSequence(this);
-  } else if (((RelocInfo::IsJSReturn(mode) &&
-              IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
-              IsPatchedDebugBreakSlotSequence())) &&
-             isolate->debug()->has_break_points()) {
+  } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence()) {
     visitor->VisitDebugTarget(this);
   } else if (IsRuntimeEntry(mode)) {
     visitor->VisitRuntimeEntry(this);
@@ -320,21 +307,19 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitEmbeddedPointer(heap, this);
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+    Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::CELL) {
     StaticVisitor::VisitCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(this);
-    CpuFeatures::FlushICache(pc_, sizeof(Address));
+  } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+    StaticVisitor::VisitInternalReference(this);
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     StaticVisitor::VisitCodeAgeSequence(heap, this);
-  } else if (heap->isolate()->debug()->has_break_points() &&
-             ((RelocInfo::IsJSReturn(mode) &&
-              IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
-              IsPatchedDebugBreakSlotSequence()))) {
+  } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence()) {
     StaticVisitor::VisitDebugTarget(heap, this);
   } else if (IsRuntimeEntry(mode)) {
     StaticVisitor::VisitRuntimeEntry(this);
@@ -395,6 +380,12 @@
 }
 
 
+void Assembler::emit_q(uint64_t x) {
+  *reinterpret_cast<uint64_t*>(pc_) = x;
+  pc_ += sizeof(uint64_t);
+}
+
+
 void Assembler::emit(Handle<Object> handle) {
   AllowDeferredHandleDereference heap_object_check;
   // Verify all Objects referred by code are NOT in new space.
@@ -459,20 +450,18 @@
 }
 
 
-Address Assembler::target_address_at(Address pc,
-                                     ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
   return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
 }
 
 
-void Assembler::set_target_address_at(Address pc,
-                                      ConstantPoolArray* constant_pool,
-                                      Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+                                      Address constant_pool, Address target,
                                       ICacheFlushMode icache_flush_mode) {
   int32_t* p = reinterpret_cast<int32_t*>(pc);
   *p = target - (pc + sizeof(int32_t));
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CpuFeatures::FlushICache(p, sizeof(int32_t));
+    Assembler::FlushICache(isolate, p, sizeof(int32_t));
   }
 }
 
@@ -482,11 +471,6 @@
 }
 
 
-Address Assembler::break_address_from_return_address(Address pc) {
-  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
 Displacement Assembler::disp_at(Label* L) {
   return Displacement(long_at(L->pos()));
 }
@@ -516,6 +500,12 @@
 }
 
 
+void Assembler::deserialization_set_target_internal_reference_at(
+    Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+  Memory::Address_at(pc) = target;
+}
+
+
 void Operand::set_modrm(int mod, Register rm) {
   DCHECK((mod & -4) == 0);
   buf_[0] = mod << 6 | rm.code();
@@ -571,6 +561,7 @@
   set_modrm(0, ebp);
   set_dispr(imm.x_, imm.rmode_);
 }
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 168a196..f120a62 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -36,12 +36,17 @@
 
 #include "src/ia32/assembler-ia32.h"
 
+#include <cstring>
+
+#if V8_TARGET_ARCH_IA32
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h>  // _xgetbv()
+#endif
 #if V8_OS_MACOSX
 #include <sys/sysctl.h>
 #endif
 
-#if V8_TARGET_ARCH_IA32
-
 #include "src/base/bits.h"
 #include "src/base/cpu.h"
 #include "src/disassembler.h"
@@ -56,22 +61,44 @@
 
 namespace {
 
-bool EnableAVX() {
+#if !V8_LIBC_MSVCRT
+
+V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
+  unsigned eax, edx;
+  // Check xgetbv; this uses a .byte sequence instead of the instruction
+  // directly because older assemblers do not include support for xgetbv and
+  // there is no easy way to conditionally compile based on the assembler
+  // used.
+  __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+  return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
+}
+
+#define _XCR_XFEATURE_ENABLED_MASK 0
+
+#endif  // !V8_LIBC_MSVCRT
+
+
+bool OSHasAVXSupport() {
 #if V8_OS_MACOSX
-  // Mac OS X 10.9 has a bug where AVX transitions were indeed being caused by
-  // ISRs, so we detect Mac OS X 10.9 here and disable AVX in that case.
+  // Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
+  // caused by ISRs, so we detect that here and disable AVX in that case.
   char buffer[128];
   size_t buffer_size = arraysize(buffer);
-  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
   if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
     V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
   }
   // The buffer now contains a string of the form XX.YY.ZZ, where
-  // XX is the major kernel version component. 13.x.x (Mavericks) is
-  // affected by this bug, so disable AVX there.
-  if (memcmp(buffer, "13.", 3) == 0) return false;
+  // XX is the major kernel version component.
+  char* period_pos = strchr(buffer, '.');
+  DCHECK_NOT_NULL(period_pos);
+  *period_pos = '\0';
+  long kernel_version_major = strtol(buffer, nullptr, 10);  // NOLINT
+  if (kernel_version_major <= 13) return false;
 #endif  // V8_OS_MACOSX
-  return FLAG_enable_avx;
+  // Check whether OS claims to support AVX.
+  uint64_t feature_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+  return (feature_mask & 0x6) == 0x6;
 }
 
 }  // namespace
@@ -87,16 +114,36 @@
 
   if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
   if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
-  if (cpu.has_avx() && EnableAVX()) supported_ |= 1u << AVX;
-  if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
+  if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
+      OSHasAVXSupport()) {
+    supported_ |= 1u << AVX;
+  }
+  if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
+      OSHasAVXSupport()) {
+    supported_ |= 1u << FMA3;
+  }
+  if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
+  if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
+  if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
+  if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT;
+  if (strcmp(FLAG_mcpu, "auto") == 0) {
+    if (cpu.is_atom()) supported_ |= 1u << ATOM;
+  } else if (strcmp(FLAG_mcpu, "atom") == 0) {
+    supported_ |= 1u << ATOM;
+  }
 }
 
 
 void CpuFeatures::PrintTarget() { }
 void CpuFeatures::PrintFeatures() {
-  printf("SSE3=%d SSE4_1=%d AVX=%d FMA3=%d\n", CpuFeatures::IsSupported(SSE3),
-         CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX),
-         CpuFeatures::IsSupported(FMA3));
+  printf(
+      "SSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d POPCNT=%d "
+      "ATOM=%d\n",
+      CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
+      CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
+      CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
+      CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
+      CpuFeatures::IsSupported(ATOM));
 }
 
 
@@ -121,9 +168,9 @@
 
 
 const int RelocInfo::kApplyMask =
-  RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
-    1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
-    1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+    RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+    1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::CODE_AGE_SEQUENCE |
+    RelocInfo::kDebugBreakSlotMask;
 
 
 bool RelocInfo::IsCodedSpecially() {
@@ -140,48 +187,6 @@
 }
 
 
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
-  // Patch the code at the current address with the supplied instructions.
-  for (int i = 0; i < instruction_count; i++) {
-    *(pc_ + i) = *(instructions + i);
-  }
-
-  // Indicate that code has changed.
-  CpuFeatures::FlushICache(pc_, instruction_count);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
-  // Call instruction takes up 5 bytes and int3 takes up one byte.
-  static const int kCallCodeSize = 5;
-  int code_size = kCallCodeSize + guard_bytes;
-
-  // Create a code patcher.
-  CodePatcher patcher(pc_, code_size);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_codesize;
-  patcher.masm()->bind(&check_codesize);
-#endif
-
-  // Patch the code.
-  patcher.masm()->call(target, RelocInfo::NONE32);
-
-  // Check that the size of the code generated is as expected.
-  DCHECK_EQ(kCallCodeSize,
-            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
-  // Add the requested number of int3 instructions after the call.
-  DCHECK_GE(guard_bytes, 0);
-  for (int i = 0; i < guard_bytes; i++) {
-    patcher.masm()->int3();
-  }
-}
-
-
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
@@ -294,6 +299,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
+  reloc_info_writer.Finish();
   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -301,6 +307,7 @@
   desc->instr_size = pc_offset();
   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   desc->origin = this;
+  desc->constant_pool_size = 0;
 }
 
 
@@ -1270,6 +1277,14 @@
 }
 
 
+void Assembler::bsf(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xBC);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
   EMIT(0xF4);
@@ -1301,6 +1316,13 @@
 }
 
 
+void Assembler::ud2() {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x0B);
+}
+
+
 // Labels refer to positions in the (to be) generated code.
 // There are bound, linked, and unused labels.
 //
@@ -1339,7 +1361,10 @@
   while (L->is_linked()) {
     Displacement disp = disp_at(L);
     int fixup_pos = L->pos();
-    if (disp.type() == Displacement::CODE_RELATIVE) {
+    if (disp.type() == Displacement::CODE_ABSOLUTE) {
+      long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos));
+      internal_reference_positions_.push_back(fixup_pos);
+    } else if (disp.type() == Displacement::CODE_RELATIVE) {
       // Relative to Code* heap object pointer.
       long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
     } else {
@@ -1540,12 +1565,12 @@
 }
 
 
-void Assembler::j(Condition cc, Handle<Code> code) {
+void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   // 0000 1111 1000 tttn #32-bit disp
   EMIT(0x0F);
   EMIT(0x80 | cc);
-  emit(code, RelocInfo::CODE_TARGET);
+  emit(code, rmode);
 }
 
 
@@ -2139,6 +2164,19 @@
 }
 
 
+void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+  DCHECK(IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x0A);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+
 void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
   DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
@@ -2178,6 +2216,42 @@
 }
 
 
+void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x62);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x6A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5F);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5D);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF2);
@@ -2242,26 +2316,6 @@
 }
 
 
-void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
-  DCHECK(IsEnabled(SSE4_1));
-  EnsureSpace ensure_space(this);
-  EMIT(0x66);
-  EMIT(0x0F);
-  EMIT(0x38);
-  EMIT(0x2A);
-  emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdq(const Operand& dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x66);
-  EMIT(0x0F);
-  EMIT(0xE7);
-  emit_sse_operand(src, dst);
-}
-
-
 void Assembler::prefetch(const Operand& src, int level) {
   DCHECK(is_uint2(level));
   EnsureSpace ensure_space(this);
@@ -2506,6 +2560,15 @@
 }
 
 
+void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x51);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0x0f);
@@ -2514,6 +2577,24 @@
 }
 
 
+void Assembler::maxss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x5F);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x5D);
+  emit_sse_operand(dst, src);
+}
+
+
 // AVX instructions
 void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
                        const Operand& src2) {
@@ -2545,6 +2626,97 @@
 }
 
 
+void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
+                    const Operand& src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kLIG, kF3, k0F, kWIG);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
+                    const Operand& src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kL128, kNone, k0F, kWIG);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
+                    const Operand& src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kL128, k66, k0F, kWIG);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
+  DCHECK(IsEnabled(BMI1));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(vreg, kLZ, kNone, k0F38, kW0);
+  EMIT(op);
+  emit_operand(reg, rm);
+}
+
+
+void Assembler::tzcnt(Register dst, const Operand& src) {
+  DCHECK(IsEnabled(BMI1));
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0xBC);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::lzcnt(Register dst, const Operand& src) {
+  DCHECK(IsEnabled(LZCNT));
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0xBD);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::popcnt(Register dst, const Operand& src) {
+  DCHECK(IsEnabled(POPCNT));
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0xB8);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
+                     const Operand& rm) {
+  DCHECK(IsEnabled(BMI2));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(vreg, kLZ, pp, k0F38, kW0);
+  EMIT(op);
+  emit_operand(reg, rm);
+}
+
+
+void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
+  DCHECK(IsEnabled(BMI2));
+  DCHECK(is_uint8(imm8));
+  Register vreg = {0};  // VEX.vvvv unused
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(vreg, kLZ, kF2, k0F3A, kW0);
+  EMIT(0xF0);
+  emit_operand(dst, src);
+  EMIT(imm8);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
@@ -2570,7 +2742,8 @@
                                 LeadingOpcode mm, VexW w) {
   if (mm != k0F || w != kW0) {
     EMIT(0xc4);
-    EMIT(0xc0 | mm);
+    // Change RXB from "110" to "111" to align with gdb disassembler.
+    EMIT(0xe0 | mm);
     EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp);
   } else {
     EMIT(0xc5);
@@ -2579,25 +2752,10 @@
 }
 
 
-void Assembler::RecordJSReturn() {
-  positions_recorder()->WriteRecordedPositions();
-  EnsureSpace ensure_space(this);
-  RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
-  positions_recorder()->WriteRecordedPositions();
-  EnsureSpace ensure_space(this);
-  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
-  if (FLAG_code_comments || force) {
-    EnsureSpace ensure_space(this);
-    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
-  }
+void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
+                                LeadingOpcode mm, VexW w) {
+  XMMRegister ivreg = {vreg.code()};
+  emit_vex_prefix(ivreg, l, pp, mm, w);
 }
 
 
@@ -2618,6 +2776,7 @@
 
   // Set up new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.origin = this;
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
 
@@ -2642,15 +2801,10 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // Relocate runtime entries.
-  for (RelocIterator it(desc); !it.done(); it.next()) {
-    RelocInfo::Mode rmode = it.rinfo()->rmode();
-    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
-      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
-      if (*p != 0) {  // 0 means uninitialized.
-        *p += pc_delta;
-      }
-    }
+  // Relocate internal references.
+  for (auto pos : internal_reference_positions_) {
+    int32_t* p = reinterpret_cast<int32_t*>(buffer_ + pos);
+    *p += pc_delta;
   }
 
   DCHECK(!buffer_overflow());
@@ -2700,7 +2854,21 @@
   if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
     pc_ -= sizeof(int32_t);  // pc_ must be *at* disp32
     RecordRelocInfo(adr.rmode_);
-    pc_ += sizeof(int32_t);
+    if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) {  // Fixup for labels
+      emit_label(*reinterpret_cast<Label**>(pc_));
+    } else {
+      pc_ += sizeof(int32_t);
+    }
+  }
+}
+
+
+void Assembler::emit_label(Label* label) {
+  if (label->is_bound()) {
+    internal_reference_positions_.push_back(pc_offset());
+    emit(reinterpret_cast<uint32_t>(buffer_ + label->pos()));
+  } else {
+    emit_disp(label, Displacement::CODE_ABSOLUTE);
   }
 }
 
@@ -2725,6 +2893,19 @@
 }
 
 
+void Assembler::dq(uint64_t data) {
+  EnsureSpace ensure_space(this);
+  emit_q(data);
+}
+
+
+void Assembler::dd(Label* label) {
+  EnsureSpace ensure_space(this);
+  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+  emit_label(label);
+}
+
+
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   DCHECK(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
@@ -2732,25 +2913,11 @@
       !serializer_enabled() && !emit_debug_code()) {
     return;
   }
-  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
 
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
-  // No out-of-line constant pool support.
-  DCHECK(!FLAG_enable_ool_constant_pool);
-  return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
-  // No out-of-line constant pool support.
-  DCHECK(!FLAG_enable_ool_constant_pool);
-  return;
-}
-
-
 #ifdef GENERATED_CODE_COVERAGE
 static FILE* coverage_log = NULL;
 
@@ -2776,6 +2943,7 @@
 
 #endif
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index b913f7a..0b20252 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -37,12 +37,52 @@
 #ifndef V8_IA32_ASSEMBLER_IA32_H_
 #define V8_IA32_ASSEMBLER_IA32_H_
 
+#include <deque>
+
+#include "src/assembler.h"
 #include "src/isolate.h"
-#include "src/serialize.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+#define GENERAL_REGISTERS(V) \
+  V(eax)                     \
+  V(ecx)                     \
+  V(edx)                     \
+  V(ebx)                     \
+  V(esp)                     \
+  V(ebp)                     \
+  V(esi)                     \
+  V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+  V(eax)                                 \
+  V(ecx)                                 \
+  V(edx)                                 \
+  V(ebx)                                 \
+  V(esi)                                 \
+  V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+  V(xmm0)                   \
+  V(xmm1)                   \
+  V(xmm2)                   \
+  V(xmm3)                   \
+  V(xmm4)                   \
+  V(xmm5)                   \
+  V(xmm6)                   \
+  V(xmm7)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+  V(xmm1)                               \
+  V(xmm2)                               \
+  V(xmm3)                               \
+  V(xmm4)                               \
+  V(xmm5)                               \
+  V(xmm6)                               \
+  V(xmm7)
+
 // CPU Registers.
 //
 // 1) We would prefer to use an enum, but enum values are assignment-
@@ -65,151 +105,86 @@
 // and best performance in optimized code.
 //
 struct Register {
-  static const int kMaxNumAllocatableRegisters = 6;
-  static int NumAllocatableRegisters() {
-    return kMaxNumAllocatableRegisters;
-  }
-  static const int kNumRegisters = 8;
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
 
-  static inline const char* AllocationIndexToString(int index);
-
-  static inline int ToAllocationIndex(Register reg);
-
-  static inline Register FromAllocationIndex(int index);
+  static const int kNumRegisters = Code::kAfterLast;
 
   static Register from_code(int code) {
     DCHECK(code >= 0);
     DCHECK(code < kNumRegisters);
-    Register r = { code };
+    Register r = {code};
     return r;
   }
-  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
-  bool is(Register reg) const { return code_ == reg.code_; }
-  // eax, ebx, ecx and edx are byte registers, the rest are not.
-  bool is_byte_register() const { return code_ <= 3; }
+  const char* ToString();
+  bool IsAllocatable() const;
+  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+  bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
     DCHECK(is_valid());
-    return code_;
+    return reg_code;
   }
   int bit() const {
     DCHECK(is_valid());
-    return 1 << code_;
+    return 1 << reg_code;
   }
 
+  bool is_byte_register() const { return reg_code <= 3; }
+
   // Unfortunately we can't make this private in a struct.
-  int code_;
+  int reg_code;
 };
 
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
 
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
 
 
-inline const char* Register::AllocationIndexToString(int index) {
-  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
-  // This is the mapping of allocation indices to registers.
-  const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
-  return kNames[index];
-}
+struct DoubleRegister {
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
 
+  static const int kMaxNumRegisters = Code::kAfterLast;
 
-inline int Register::ToAllocationIndex(Register reg) {
-  DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
-  return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index)  {
-  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
-  return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-
-struct XMMRegister {
-  static const int kMaxNumAllocatableRegisters = 7;
-  static const int kMaxNumRegisters = 8;
-  static int NumAllocatableRegisters() {
-    return kMaxNumAllocatableRegisters;
-  }
-
-  // TODO(turbofan): Proper support for float32.
-  static int NumAllocatableAliasedRegisters() {
-    return NumAllocatableRegisters();
-  }
-
-  static int ToAllocationIndex(XMMRegister reg) {
-    DCHECK(reg.code() != 0);
-    return reg.code() - 1;
-  }
-
-  static XMMRegister FromAllocationIndex(int index) {
-    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
-    return from_code(index + 1);
-  }
-
-  static XMMRegister from_code(int code) {
-    XMMRegister result = { code };
+  static DoubleRegister from_code(int code) {
+    DoubleRegister result = {code};
     return result;
   }
 
-  bool is_valid() const {
-    return 0 <= code_ && code_ < kMaxNumRegisters;
-  }
+  bool IsAllocatable() const;
+  bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
 
   int code() const {
     DCHECK(is_valid());
-    return code_;
+    return reg_code;
   }
 
-  bool is(XMMRegister reg) const { return code_ == reg.code_; }
+  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
 
-  static const char* AllocationIndexToString(int index) {
-    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
-    const char* const names[] = {
-      "xmm1",
-      "xmm2",
-      "xmm3",
-      "xmm4",
-      "xmm5",
-      "xmm6",
-      "xmm7"
-    };
-    return names[index];
-  }
+  const char* ToString();
 
-  int code_;
+  int reg_code;
 };
 
+#define DECLARE_REGISTER(R) \
+  const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
 
-typedef XMMRegister DoubleRegister;
-
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister no_xmm_reg = { -1 };
-
+typedef DoubleRegister XMMRegister;
 
 enum Condition {
   // any value < 0 is considered no_condition
@@ -276,6 +251,14 @@
 }
 
 
+enum RoundingMode {
+  kRoundToNearest = 0x0,
+  kRoundDown = 0x1,
+  kRoundUp = 0x2,
+  kRoundToZero = 0x3
+};
+
+
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
 
@@ -357,6 +340,11 @@
                    int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE32);
 
+  static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
+    return Operand(index, scale, reinterpret_cast<int32_t>(table),
+                   RelocInfo::INTERNAL_REFERENCE);
+  }
+
   static Operand StaticVariable(const ExternalReference& ext) {
     return Operand(reinterpret_cast<int32_t>(ext.address()),
                    RelocInfo::EXTERNAL_REFERENCE);
@@ -430,11 +418,7 @@
 
 class Displacement BASE_EMBEDDED {
  public:
-  enum Type {
-    UNCONDITIONAL_JUMP,
-    CODE_RELATIVE,
-    OTHER
-  };
+  enum Type { UNCONDITIONAL_JUMP, CODE_RELATIVE, OTHER, CODE_ABSOLUTE };
 
   int data() const { return data_; }
   Type type() const { return TypeField::decode(data_); }
@@ -500,60 +484,53 @@
   void GetCode(CodeDesc* desc);
 
   // Read/Modify the code target in the branch/call instruction at pc.
-  inline static Address target_address_at(Address pc,
-                                          ConstantPoolArray* constant_pool);
-  inline static void set_target_address_at(Address pc,
-                                           ConstantPoolArray* constant_pool,
-                                           Address target,
-                                           ICacheFlushMode icache_flush_mode =
-                                               FLUSH_ICACHE_IF_NEEDED);
+  inline static Address target_address_at(Address pc, Address constant_pool);
+  inline static void set_target_address_at(
+      Isolate* isolate, Address pc, Address constant_pool, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
   static inline Address target_address_at(Address pc, Code* code) {
-    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+    Address constant_pool = code ? code->constant_pool() : NULL;
     return target_address_at(pc, constant_pool);
   }
-  static inline void set_target_address_at(Address pc,
-                                           Code* code,
-                                           Address target,
-                                           ICacheFlushMode icache_flush_mode =
-                                               FLUSH_ICACHE_IF_NEEDED) {
-    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(pc, constant_pool, target);
+  static inline void set_target_address_at(
+      Isolate* isolate, Address pc, Code* code, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+    Address constant_pool = code ? code->constant_pool() : NULL;
+    set_target_address_at(isolate, pc, constant_pool, target);
   }
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
   inline static Address target_address_from_return_address(Address pc);
 
-  // Return the code target address of the patch debug break slot
-  inline static Address break_address_from_return_address(Address pc);
-
   // This sets the branch destination (which is in the instruction on x86).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
-      Address instruction_payload, Code* code, Address target) {
-    set_target_address_at(instruction_payload, code, target);
+      Isolate* isolate, Address instruction_payload, Code* code,
+      Address target) {
+    set_target_address_at(isolate, instruction_payload, code, target);
   }
 
+  // This sets the internal reference at the pc.
+  inline static void deserialization_set_target_internal_reference_at(
+      Isolate* isolate, Address pc, Address target,
+      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
   static const int kSpecialTargetSize = kPointerSize;
 
   // Distance between the address of the code target in the call instruction
   // and the return address
   static const int kCallTargetAddressOffset = kPointerSize;
-  // Distance between start of patched return sequence and the emitted address
-  // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = 1;  // JMP imm32.
+
+  static const int kCallInstructionLength = 5;
+
+  // The debug break slot must be able to contain a call instruction.
+  static const int kDebugBreakSlotLength = kCallInstructionLength;
 
   // Distance between start of patched debug break slot and the emitted address
   // to jump to.
   static const int kPatchDebugBreakSlotAddressOffset = 1;  // JMP imm32.
 
-  static const int kCallInstructionLength = 5;
-  static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
-  static const int kJSReturnSequenceLength = 6;
-
-  // The debug break slot must be able to contain a call instruction.
-  static const int kDebugBreakSlotLength = kCallInstructionLength;
-
   // One byte opcode for test al, 0xXX.
   static const byte kTestAlByte = 0xA8;
   // One byte opcode for nop.
@@ -593,6 +570,9 @@
   // possible to align the pc offset to a multiple
   // of m. m must be a power of 2.
   void Align(int m);
+  // Insert the smallest number of zero bytes possible to align the pc offset
+  // to a mulitple of m. m must be a power of 2 (>= 2).
+  void DataAlign(int m);
   void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
@@ -798,12 +778,15 @@
   void bts(const Operand& dst, Register src);
   void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
   void bsr(Register dst, const Operand& src);
+  void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
+  void bsf(Register dst, const Operand& src);
 
   // Miscellaneous
   void hlt();
   void int3();
   void nop();
   void ret(int imm16);
+  void ud2();
 
   // Label operations & relative jumps (PPUM Appendix D)
   //
@@ -846,7 +829,8 @@
          Label* L,
          Label::Distance distance = Label::kFar);
   void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
-  void j(Condition cc, Handle<Code> code);
+  void j(Condition cc, Handle<Code> code,
+         RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
 
   // Floating-point operations
   void fld(int i);
@@ -936,12 +920,19 @@
   void mulss(XMMRegister dst, const Operand& src);
   void divss(XMMRegister dst, XMMRegister src) { divss(dst, Operand(src)); }
   void divss(XMMRegister dst, const Operand& src);
+  void sqrtss(XMMRegister dst, XMMRegister src) { sqrtss(dst, Operand(src)); }
+  void sqrtss(XMMRegister dst, const Operand& src);
 
   void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
   void ucomiss(XMMRegister dst, const Operand& src);
   void movaps(XMMRegister dst, XMMRegister src);
   void shufps(XMMRegister dst, XMMRegister src, byte imm8);
 
+  void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
+  void maxss(XMMRegister dst, const Operand& src);
+  void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
+  void minss(XMMRegister dst, const Operand& src);
+
   void andps(XMMRegister dst, const Operand& src);
   void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
   void xorps(XMMRegister dst, const Operand& src);
@@ -997,13 +988,7 @@
   void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
   void ucomisd(XMMRegister dst, const Operand& src);
 
-  enum RoundingMode {
-    kRoundToNearest = 0x0,
-    kRoundDown      = 0x1,
-    kRoundUp        = 0x2,
-    kRoundToZero    = 0x3
-  };
-
+  void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
   void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
 
   void movmskpd(Register dst, XMMRegister src);
@@ -1012,6 +997,14 @@
   void cmpltsd(XMMRegister dst, XMMRegister src);
   void pcmpeqd(XMMRegister dst, XMMRegister src);
 
+  void punpckldq(XMMRegister dst, XMMRegister src);
+  void punpckhdq(XMMRegister dst, XMMRegister src);
+
+  void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
+  void maxsd(XMMRegister dst, const Operand& src);
+  void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
+  void minsd(XMMRegister dst, const Operand& src);
+
   void movdqa(XMMRegister dst, const Operand& src);
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqu(XMMRegister dst, const Operand& src);
@@ -1059,10 +1052,6 @@
   }
   void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
 
-  // Parallel XMM operations.
-  void movntdqa(XMMRegister dst, const Operand& src);
-  void movntdq(const Operand& dst, XMMRegister src);
-
   // AVX instructions
   void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
     vfmadd132sd(dst, src1, Operand(src2));
@@ -1236,8 +1225,166 @@
   void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
     vsd(0x5e, dst, src1, src2);
   }
+  void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vmaxsd(dst, src1, Operand(src2));
+  }
+  void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5f, dst, src1, src2);
+  }
+  void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vminsd(dst, src1, Operand(src2));
+  }
+  void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5d, dst, src1, src2);
+  }
   void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
 
+  void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vaddss(dst, src1, Operand(src2));
+  }
+  void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x58, dst, src1, src2);
+  }
+  void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsubss(dst, src1, Operand(src2));
+  }
+  void vsubss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x5c, dst, src1, src2);
+  }
+  void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vmulss(dst, src1, Operand(src2));
+  }
+  void vmulss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x59, dst, src1, src2);
+  }
+  void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vdivss(dst, src1, Operand(src2));
+  }
+  void vdivss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x5e, dst, src1, src2);
+  }
+  void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vmaxss(dst, src1, Operand(src2));
+  }
+  void vmaxss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x5f, dst, src1, src2);
+  }
+  void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vminss(dst, src1, Operand(src2));
+  }
+  void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vss(0x5d, dst, src1, src2);
+  }
+  void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
+  // BMI instruction
+  void andn(Register dst, Register src1, Register src2) {
+    andn(dst, src1, Operand(src2));
+  }
+  void andn(Register dst, Register src1, const Operand& src2) {
+    bmi1(0xf2, dst, src1, src2);
+  }
+  void bextr(Register dst, Register src1, Register src2) {
+    bextr(dst, Operand(src1), src2);
+  }
+  void bextr(Register dst, const Operand& src1, Register src2) {
+    bmi1(0xf7, dst, src2, src1);
+  }
+  void blsi(Register dst, Register src) { blsi(dst, Operand(src)); }
+  void blsi(Register dst, const Operand& src) {
+    Register ireg = {3};
+    bmi1(0xf3, ireg, dst, src);
+  }
+  void blsmsk(Register dst, Register src) { blsmsk(dst, Operand(src)); }
+  void blsmsk(Register dst, const Operand& src) {
+    Register ireg = {2};
+    bmi1(0xf3, ireg, dst, src);
+  }
+  void blsr(Register dst, Register src) { blsr(dst, Operand(src)); }
+  void blsr(Register dst, const Operand& src) {
+    Register ireg = {1};
+    bmi1(0xf3, ireg, dst, src);
+  }
+  void tzcnt(Register dst, Register src) { tzcnt(dst, Operand(src)); }
+  void tzcnt(Register dst, const Operand& src);
+
+  void lzcnt(Register dst, Register src) { lzcnt(dst, Operand(src)); }
+  void lzcnt(Register dst, const Operand& src);
+
+  void popcnt(Register dst, Register src) { popcnt(dst, Operand(src)); }
+  void popcnt(Register dst, const Operand& src);
+
+  void bzhi(Register dst, Register src1, Register src2) {
+    bzhi(dst, Operand(src1), src2);
+  }
+  void bzhi(Register dst, const Operand& src1, Register src2) {
+    bmi2(kNone, 0xf5, dst, src2, src1);
+  }
+  void mulx(Register dst1, Register dst2, Register src) {
+    mulx(dst1, dst2, Operand(src));
+  }
+  void mulx(Register dst1, Register dst2, const Operand& src) {
+    bmi2(kF2, 0xf6, dst1, dst2, src);
+  }
+  void pdep(Register dst, Register src1, Register src2) {
+    pdep(dst, src1, Operand(src2));
+  }
+  void pdep(Register dst, Register src1, const Operand& src2) {
+    bmi2(kF2, 0xf5, dst, src1, src2);
+  }
+  void pext(Register dst, Register src1, Register src2) {
+    pext(dst, src1, Operand(src2));
+  }
+  void pext(Register dst, Register src1, const Operand& src2) {
+    bmi2(kF3, 0xf5, dst, src1, src2);
+  }
+  void sarx(Register dst, Register src1, Register src2) {
+    sarx(dst, Operand(src1), src2);
+  }
+  void sarx(Register dst, const Operand& src1, Register src2) {
+    bmi2(kF3, 0xf7, dst, src2, src1);
+  }
+  void shlx(Register dst, Register src1, Register src2) {
+    shlx(dst, Operand(src1), src2);
+  }
+  void shlx(Register dst, const Operand& src1, Register src2) {
+    bmi2(k66, 0xf7, dst, src2, src1);
+  }
+  void shrx(Register dst, Register src1, Register src2) {
+    shrx(dst, Operand(src1), src2);
+  }
+  void shrx(Register dst, const Operand& src1, Register src2) {
+    bmi2(kF2, 0xf7, dst, src2, src1);
+  }
+  void rorx(Register dst, Register src, byte imm8) {
+    rorx(dst, Operand(src), imm8);
+  }
+  void rorx(Register dst, const Operand& src, byte imm8);
+
+#define PACKED_OP_LIST(V) \
+  V(and, 0x54)            \
+  V(xor, 0x57)
+
+#define AVX_PACKED_OP_DECLARE(name, opcode)                                  \
+  void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {    \
+    vps(opcode, dst, src1, Operand(src2));                                   \
+  }                                                                          \
+  void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+    vps(opcode, dst, src1, src2);                                            \
+  }                                                                          \
+  void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {    \
+    vpd(opcode, dst, src1, Operand(src2));                                   \
+  }                                                                          \
+  void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+    vpd(opcode, dst, src1, src2);                                            \
+  }
+
+  PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
+  void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+  void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+  void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+  void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
   // non-temporal
@@ -1249,21 +1396,27 @@
     return pc_offset() - label->pos();
   }
 
-  // Mark address of the ExitJSFrame code.
-  void RecordJSReturn();
+  // Mark generator continuation.
+  void RecordGeneratorContinuation();
 
   // Mark address of a debug break slot.
-  void RecordDebugBreakSlot();
+  void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --code-comments to enable, or provide "force = true" flag to always
-  // write a comment.
-  void RecordComment(const char* msg, bool force = false);
+  // Use --code-comments to enable.
+  void RecordComment(const char* msg);
+
+  // Record a deoptimization reason that can be used by a log or cpu profiler.
+  // Use --trace-deopt to enable.
+  void RecordDeoptReason(const int reason, const SourcePosition position);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
   void db(uint8_t data);
   void dd(uint32_t data);
+  void dq(uint64_t data);
+  void dp(uintptr_t data) { dd(data); }
+  void dd(Label* label);
 
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
@@ -1289,11 +1442,12 @@
   byte byte_at(int pos) { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
 
-  // Allocate a constant pool of the correct size for the generated code.
-  Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
-  // Generate the constant pool for the generated code.
-  void PopulateConstantPool(ConstantPoolArray* constant_pool);
+  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+                                          ConstantPoolEntry::Access access,
+                                          ConstantPoolEntry::Type type) {
+    // No embedded constant pool support.
+    UNREACHABLE();
+  }
 
  protected:
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
@@ -1324,6 +1478,7 @@
                    TypeFeedbackId id = TypeFeedbackId::None());
   inline void emit(const Immediate& x);
   inline void emit_w(const Immediate& x);
+  inline void emit_q(uint64_t x);
 
   // Emit the code-object-relative offset of the label's position
   inline void emit_code_relative_offset(Label* label);
@@ -1339,15 +1494,19 @@
 
   void emit_operand(Register reg, const Operand& adr);
 
+  void emit_label(Label* label);
+
   void emit_farith(int b1, int b2, int i);
 
   // Emit vex prefix
   enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
-  enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
+  enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
   enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
-  enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
+  enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
   inline void emit_vex_prefix(XMMRegister v, VectorLength l, SIMDPrefix pp,
                               LeadingOpcode m, VexW w);
+  inline void emit_vex_prefix(Register v, VectorLength l, SIMDPrefix pp,
+                              LeadingOpcode m, VexW w);
 
   // labels
   void print(Label* L);
@@ -1359,12 +1518,22 @@
   inline void emit_disp(Label* L, Displacement::Type type);
   inline void emit_near_disp(Label* L);
 
+  // Most BMI instructions are similiar.
+  void bmi1(byte op, Register reg, Register vreg, const Operand& rm);
+  void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
+            const Operand& rm);
+
   // record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
 
   friend class CodePatcher;
   friend class EnsureSpace;
 
+  // Internal reference positions, required for (potential) patching in
+  // GrowBuffer(); contains only those internal references whose labels
+  // are already bound.
+  std::deque<int> internal_reference_positions_;
+
   // code generation
   RelocInfoWriter reloc_info_writer;
 
@@ -1400,6 +1569,7 @@
 #endif
 };
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 5767489..a2aec74 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -2,14 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
 
 namespace v8 {
 namespace internal {
@@ -23,45 +22,62 @@
                                 BuiltinExtraArguments extra_args) {
   // ----------- S t a t e -------------
   //  -- eax                : number of arguments excluding receiver
-  //  -- edi                : called function (only guaranteed when
-  //                          extra_args requires it)
-  //  -- esi                : context
+  //  -- edi                : target
+  //  -- edx                : new.target
   //  -- esp[0]             : return address
   //  -- esp[4]             : last argument
   //  -- ...
-  //  -- esp[4 * argc]      : first argument (argc == eax)
+  //  -- esp[4 * argc]      : first argument
   //  -- esp[4 * (argc +1)] : receiver
   // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Insert extra arguments.
   int num_extra_args = 0;
-  if (extra_args == NEEDS_CALLED_FUNCTION) {
-    num_extra_args = 1;
-    Register scratch = ebx;
-    __ pop(scratch);  // Save return address.
-    __ push(edi);
-    __ push(scratch);  // Restore return address.
-  } else {
-    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+  if (extra_args != BuiltinExtraArguments::kNone) {
+    __ PopReturnAddressTo(ecx);
+    if (extra_args & BuiltinExtraArguments::kTarget) {
+      ++num_extra_args;
+      __ Push(edi);
+    }
+    if (extra_args & BuiltinExtraArguments::kNewTarget) {
+      ++num_extra_args;
+      __ Push(edx);
+    }
+    __ PushReturnAddressFrom(ecx);
   }
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
   __ add(eax, Immediate(num_extra_args + 1));
+
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
 
 static void CallRuntimePassFunction(
     MacroAssembler* masm, Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+
   FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the function.
+  // Push a copy of the target function and the new target.
   __ push(edi);
+  __ push(edx);
   // Function is also the parameter to the runtime call.
   __ push(edi);
 
   __ CallRuntime(function_id, 1);
-  // Restore receiver.
+  // Restore target function and new target.
+  __ pop(edx);
   __ pop(edi);
 }
 
@@ -102,303 +118,183 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_memento) {
+                                           bool create_implicit_receiver) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- edi: constructor function
   //  -- ebx: allocation site or undefined
+  //  -- edx: new target
   // -----------------------------------
 
-  // Should never create mementos for api functions.
-  DCHECK(!is_api_function || !create_memento);
-
   // Enter a construct frame.
   {
     FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-    if (create_memento) {
-      __ AssertUndefinedOrAllocationSite(ebx);
-      __ push(ebx);
-    }
-
-    // Store a smi-tagged arguments count on the stack.
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(ebx);
+    __ push(ebx);
     __ SmiTag(eax);
     __ push(eax);
 
-    // Push the function to invoke on the stack.
-    __ push(edi);
-
-    // Try to allocate the object without transitioning into C code. If any of
-    // the preconditions is not met, the code bails out to the runtime call.
-    Label rt_call, allocated;
-    if (FLAG_inline_new) {
-      Label undo_allocation;
-      ExternalReference debug_step_in_fp =
-          ExternalReference::debug_step_in_fp_address(masm->isolate());
-      __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
-      __ j(not_equal, &rt_call);
-
-      // Verified that the constructor is a JSFunction.
-      // Load the initial map and verify that it is in fact a map.
-      // edi: constructor
-      __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-      // Will both indicate a NULL and a Smi
-      __ JumpIfSmi(eax, &rt_call);
-      // edi: constructor
-      // eax: initial map (if proven valid below)
-      __ CmpObjectType(eax, MAP_TYPE, ebx);
-      __ j(not_equal, &rt_call);
-
-      // Check that the constructor is not constructing a JSFunction (see
-      // comments in Runtime_NewObject in runtime.cc). In which case the
-      // initial map's instance type would be JS_FUNCTION_TYPE.
-      // edi: constructor
-      // eax: initial map
-      __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-      __ j(equal, &rt_call);
-
-      if (!is_api_function) {
-        Label allocate;
-        // The code below relies on these assumptions.
-        STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
-        // Check if slack tracking is enabled.
-        __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
-        __ shr(esi, Map::Counter::kShift);
-        __ cmp(esi, Map::kSlackTrackingCounterEnd);
-        __ j(less, &allocate);
-        // Decrease generous allocation count.
-        __ sub(FieldOperand(eax, Map::kBitField3Offset),
-               Immediate(1 << Map::Counter::kShift));
-
-        __ cmp(esi, Map::kSlackTrackingCounterEnd);
-        __ j(not_equal, &allocate);
-
-        __ push(eax);
-        __ push(edi);
-
-        __ push(edi);  // constructor
-        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-        __ pop(edi);
-        __ pop(eax);
-        __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
-
-        __ bind(&allocate);
-      }
-
-      // Now allocate the JSObject on the heap.
-      // edi: constructor
-      // eax: initial map
-      __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-      __ shl(edi, kPointerSizeLog2);
-      if (create_memento) {
-        __ add(edi, Immediate(AllocationMemento::kSize));
-      }
-
-      __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
-      Factory* factory = masm->isolate()->factory();
-
-      // Allocated the JSObject, now initialize the fields.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object (including memento if create_memento)
-      __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-      __ mov(ecx, factory->empty_fixed_array());
-      __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-      __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-      // Set extra fields in the newly allocated object.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object (including memento if create_memento)
-      // esi: slack tracking counter (non-API function case)
-      __ mov(edx, factory->undefined_value());
-      __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-      if (!is_api_function) {
-        Label no_inobject_slack_tracking;
-
-        // Check if slack tracking is enabled.
-        __ cmp(esi, Map::kSlackTrackingCounterEnd);
-        __ j(less, &no_inobject_slack_tracking);
-
-        // Allocate object with a slack.
-        __ movzx_b(esi,
-                   FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-        __ lea(esi,
-               Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
-        // esi: offset of first field after pre-allocated fields
-        if (FLAG_debug_code) {
-          __ cmp(esi, edi);
-          __ Assert(less_equal,
-                    kUnexpectedNumberOfPreAllocatedPropertyFields);
-        }
-        __ InitializeFieldsWithFiller(ecx, esi, edx);
-        __ mov(edx, factory->one_pointer_filler_map());
-        // Fill the remaining fields with one pointer filler map.
-
-        __ bind(&no_inobject_slack_tracking);
-      }
-
-      if (create_memento) {
-        __ lea(esi, Operand(edi, -AllocationMemento::kSize));
-        __ InitializeFieldsWithFiller(ecx, esi, edx);
-
-        // Fill in memento fields if necessary.
-        // esi: points to the allocated but uninitialized memento.
-        __ mov(Operand(esi, AllocationMemento::kMapOffset),
-               factory->allocation_memento_map());
-        // Get the cell or undefined.
-        __ mov(edx, Operand(esp, kPointerSize*2));
-        __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
-               edx);
-      } else {
-        __ InitializeFieldsWithFiller(ecx, edi, edx);
-      }
-
-      // Add the object tag to make the JSObject real, so that we can continue
-      // and jump into the continuation code at any time from now on. Any
-      // failures need to undo the allocation, so that the heap is in a
-      // consistent state and verifiable.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      __ or_(ebx, Immediate(kHeapObjectTag));
-
-      // Check if a non-empty properties array is needed.
-      // Allocate and initialize a FixedArray if it is.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      // Calculate the total number of properties described by the map.
-      __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-      __ movzx_b(ecx,
-                 FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-      __ add(edx, ecx);
-      // Calculate unused properties past the end of the in-object properties.
-      __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
-      __ sub(edx, ecx);
-      // Done if no extra properties are to be allocated.
-      __ j(zero, &allocated);
-      __ Assert(positive, kPropertyAllocationCountFailed);
-
-      // Scale the number of elements by pointer size and add the header for
-      // FixedArrays to the start of the next object calculation from above.
-      // ebx: JSObject
-      // edi: start of next object (will be start of FixedArray)
-      // edx: number of elements in properties array
-      __ Allocate(FixedArray::kHeaderSize,
-                  times_pointer_size,
-                  edx,
-                  REGISTER_VALUE_IS_INT32,
-                  edi,
-                  ecx,
-                  no_reg,
-                  &undo_allocation,
-                  RESULT_CONTAINS_TOP);
-
-      // Initialize the FixedArray.
-      // ebx: JSObject
-      // edi: FixedArray
-      // edx: number of elements
-      // ecx: start of next object
-      __ mov(eax, factory->fixed_array_map());
-      __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
-      __ SmiTag(edx);
-      __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
-
-      // Initialize the fields to undefined.
-      // ebx: JSObject
-      // edi: FixedArray
-      // ecx: start of next object
-      { Label loop, entry;
-        __ mov(edx, factory->undefined_value());
-        __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
-        __ jmp(&entry);
-        __ bind(&loop);
-        __ mov(Operand(eax, 0), edx);
-        __ add(eax, Immediate(kPointerSize));
-        __ bind(&entry);
-        __ cmp(eax, ecx);
-        __ j(below, &loop);
-      }
-
-      // Store the initialized FixedArray into the properties field of
-      // the JSObject
-      // ebx: JSObject
-      // edi: FixedArray
-      __ or_(edi, Immediate(kHeapObjectTag));  // add the heap tag
-      __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
-      // Continue with JSObject being successfully allocated
-      // ebx: JSObject
-      __ jmp(&allocated);
-
-      // Undo the setting of the new top so that the heap is verifiable. For
-      // example, the map's unused properties potentially do not match the
-      // allocated objects unused properties.
-      // ebx: JSObject (previous new top)
-      __ bind(&undo_allocation);
-      __ UndoAllocationInNewSpace(ebx);
-    }
-
-    // Allocate the new receiver object using the runtime call.
-    __ bind(&rt_call);
-    int offset = 0;
-    if (create_memento) {
-      // Get the cell or allocation site.
-      __ mov(edi, Operand(esp, kPointerSize * 2));
+    if (create_implicit_receiver) {
       __ push(edi);
-      offset = kPointerSize;
+      __ push(edx);
+
+      // Try to allocate the object without transitioning into C code. If any of
+      // the preconditions is not met, the code bails out to the runtime call.
+      Label rt_call, allocated;
+      if (FLAG_inline_new) {
+        // Verify that the new target is a JSFunction.
+        __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+        __ j(not_equal, &rt_call);
+
+        // Load the initial map and verify that it is in fact a map.
+        // edx: new target
+        __ mov(eax,
+               FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+        // Will both indicate a NULL and a Smi
+        __ JumpIfSmi(eax, &rt_call);
+        // edi: constructor
+        // eax: initial map (if proven valid below)
+        __ CmpObjectType(eax, MAP_TYPE, ebx);
+        __ j(not_equal, &rt_call);
+
+        // Fall back to runtime if the expected base constructor and base
+        // constructor differ.
+        __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+        __ j(not_equal, &rt_call);
+
+        // Check that the constructor is not constructing a JSFunction (see
+        // comments in Runtime_NewObject in runtime.cc). In which case the
+        // initial map's instance type would be JS_FUNCTION_TYPE.
+        // edi: constructor
+        // eax: initial map
+        __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+        __ j(equal, &rt_call);
+
+        // Now allocate the JSObject on the heap.
+        // edi: constructor
+        // eax: initial map
+        __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+        __ shl(edi, kPointerSizeLog2);
+
+        __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+        Factory* factory = masm->isolate()->factory();
+
+        // Allocated the JSObject, now initialize the fields.
+        // eax: initial map
+        // ebx: JSObject (not HeapObject tagged - the actual address).
+        // edi: start of next object
+        __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+        __ mov(ecx, factory->empty_fixed_array());
+        __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+        __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+        __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+
+        // Add the object tag to make the JSObject real, so that we can continue
+        // and jump into the continuation code at any time from now on.
+        __ or_(ebx, Immediate(kHeapObjectTag));
+
+        // Fill all the in-object properties with the appropriate filler.
+        // ebx: JSObject (tagged)
+        // ecx: First in-object property of JSObject (not tagged)
+        __ mov(edx, factory->undefined_value());
+
+        if (!is_api_function) {
+          Label no_inobject_slack_tracking;
+
+          // The code below relies on these assumptions.
+          STATIC_ASSERT(Map::kNoSlackTracking == 0);
+          STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+          // Check if slack tracking is enabled.
+          __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+          __ shr(esi, Map::ConstructionCounter::kShift);
+          __ j(zero, &no_inobject_slack_tracking);  // Map::kNoSlackTracking
+          __ push(esi);  // Save allocation count value.
+          // Decrease generous allocation count.
+          __ sub(FieldOperand(eax, Map::kBitField3Offset),
+                 Immediate(1 << Map::ConstructionCounter::kShift));
+
+          // Allocate object with a slack.
+          __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+          __ neg(esi);
+          __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
+          // esi: offset of first field after pre-allocated fields
+          if (FLAG_debug_code) {
+            __ cmp(ecx, esi);
+            __ Assert(less_equal,
+                      kUnexpectedNumberOfPreAllocatedPropertyFields);
+          }
+          __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+          // To allow truncation fill the remaining fields with one pointer
+          // filler map.
+          __ mov(edx, factory->one_pointer_filler_map());
+          __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+          __ pop(esi);  // Restore allocation count value before decreasing.
+          __ cmp(esi, Map::kSlackTrackingCounterEnd);
+          __ j(not_equal, &allocated);
+
+          // Push the object to the stack, and then the initial map as
+          // an argument to the runtime call.
+          __ push(ebx);
+          __ push(eax);  // initial map
+          __ CallRuntime(Runtime::kFinalizeInstanceSize);
+          __ pop(ebx);
+
+          // Continue with JSObject being successfully allocated
+          // ebx: JSObject (tagged)
+          __ jmp(&allocated);
+
+          __ bind(&no_inobject_slack_tracking);
+        }
+
+        __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+        // Continue with JSObject being successfully allocated
+        // ebx: JSObject (tagged)
+        __ jmp(&allocated);
+      }
+
+      // Allocate the new receiver object using the runtime call.
+      // edx: new target
+      __ bind(&rt_call);
+      int offset = kPointerSize;
+
+      // Must restore esi (context) and edi (constructor) before calling
+      // runtime.
+      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+      __ mov(edi, Operand(esp, offset));
+      __ push(edi);  // constructor function
+      __ push(edx);  // new target
+      __ CallRuntime(Runtime::kNewObject);
+      __ mov(ebx, eax);  // store result in ebx
+
+      // New object allocated.
+      // ebx: newly allocated object
+      __ bind(&allocated);
+
+      // Restore the parameters.
+      __ pop(edx);  // new.target
+      __ pop(edi);  // Constructor function.
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ mov(eax, Operand(esp, 0));
     }
 
-    // Must restore esi (context) and edi (constructor) before calling runtime.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ mov(edi, Operand(esp, offset));
-    // edi: function (constructor)
-    __ push(edi);
-    if (create_memento) {
-      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
-    } else {
-      __ CallRuntime(Runtime::kNewObject, 1);
-    }
-    __ mov(ebx, eax);  // store result in ebx
-
-    // If we ended up using the runtime, and we want a memento, then the
-    // runtime call made it for us, and we shouldn't do create count
-    // increment.
-    Label count_incremented;
-    if (create_memento) {
-      __ jmp(&count_incremented);
-    }
-
-    // New object allocated.
-    // ebx: newly allocated object
-    __ bind(&allocated);
-
-    if (create_memento) {
-      __ mov(ecx, Operand(esp, kPointerSize * 2));
-      __ cmp(ecx, masm->isolate()->factory()->undefined_value());
-      __ j(equal, &count_incremented);
-      // ecx is an AllocationSite. We are creating a memento from it, so we
-      // need to increment the memento create count.
-      __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
-             Immediate(Smi::FromInt(1)));
-      __ bind(&count_incremented);
-    }
-
-    // Retrieve the function from the stack.
-    __ pop(edi);
-
-    // Retrieve smi-tagged arguments count from the stack.
-    __ mov(eax, Operand(esp, 0));
     __ SmiUntag(eax);
 
-    // Push the allocated receiver to the stack. We need two copies
-    // because we may have to return the original one and the calling
-    // conventions dictate that the called function pops the receiver.
-    __ push(ebx);
-    __ push(ebx);
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ push(ebx);
+      __ push(ebx);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
 
     // Set up pointer to last argument.
     __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -421,39 +317,44 @@
       __ call(code, RelocInfo::CODE_TARGET);
     } else {
       ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                        NullCallWrapper());
+      __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+                        CheckDebugStepCallWrapper());
     }
 
     // Store offset of return address for deoptimizer.
-    if (!is_api_function) {
+    if (create_implicit_receiver && !is_api_function) {
       masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
     }
 
     // Restore context from the frame.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 
-    // If the result is an object (in the ECMA sense), we should get rid
-    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-    // on page 74.
-    Label use_receiver, exit;
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
 
-    // If the result is a smi, it is *not* an object in the ECMA sense.
-    __ JumpIfSmi(eax, &use_receiver);
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      __ JumpIfSmi(eax, &use_receiver);
 
-    // If the type of the result (stored in its map) is less than
-    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(above_equal, &exit);
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+      __ j(above_equal, &exit);
 
-    // Throw away the result of the constructor invocation and use the
-    // on-stack receiver as the result.
-    __ bind(&use_receiver);
-    __ mov(eax, Operand(esp, 0));
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ mov(eax, Operand(esp, 0));
 
-    // Restore the arguments count and leave the construct frame.
-    __ bind(&exit);
-    __ mov(ebx, Operand(esp, kPointerSize));  // Get arguments count.
+      // Restore the arguments count and leave the construct frame. The
+      // arguments count is stored below the receiver.
+      __ bind(&exit);
+      __ mov(ebx, Operand(esp, 1 * kPointerSize));
+    } else {
+      __ mov(ebx, Operand(esp, 0));
+    }
 
     // Leave construct frame.
   }
@@ -463,18 +364,67 @@
   __ pop(ecx);
   __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
   __ push(ecx);
-  __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+  }
   __ ret(0);
 }
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+  Generate_JSConstructStubHelper(masm, false, true);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, false);
+  Generate_JSConstructStubHelper(masm, true, true);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false);
+}
+
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ push(edi);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+
+enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
+
+
+// Clobbers ecx, edx, edi; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+                                        IsTagged eax_is_tagged) {
+  // eax   : the number of items to be pushed to the stack
+  //
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, esp);
+  __ sub(ecx, edi);
+  // Make edx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edx, eax);
+  int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
+  __ shl(edx, kPointerSizeLog2 - smi_tag);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, edx);
+  __ j(greater, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
 }
 
 
@@ -488,25 +438,30 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(esi, Operand::StaticVariable(context_address));
+
     // Load the previous frame pointer (ebx) to access C arguments
     __ mov(ebx, Operand(ebp, 0));
 
-    // Get the function from the frame and setup the context.
-    __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-    __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
     // Push the function and the receiver onto the stack.
-    __ push(ecx);
+    __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
     __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
 
     // Load the number of arguments and setup pointer to the arguments.
     __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
     __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
 
+    // Check if we have enough stack space to push all arguments.
+    // Expects argument count in eax. Clobbers ecx, edx, edi.
+    Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
+
     // Copy arguments to the stack in a loop.
     Label loop, entry;
     __ Move(ecx, Immediate(0));
-    __ jmp(&entry);
+    __ jmp(&entry, Label::kNear);
     __ bind(&loop);
     __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
     __ push(Operand(edx, 0));  // dereference handle
@@ -515,21 +470,18 @@
     __ cmp(ecx, eax);
     __ j(not_equal, &loop);
 
-    // Get the function from the stack and call it.
-    // kPointerSize for the receiver.
-    __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
+
+    // Get the new.target and function from the frame.
+    __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
+    __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
 
     // Invoke the code.
-    if (is_construct) {
-      // No type feedback cell is available
-      __ mov(ebx, masm->isolate()->factory()->undefined_value());
-      CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
-      __ CallStub(&stub);
-    } else {
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                        NullCallWrapper());
-    }
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
 
     // Exit the internal frame. Notice that this also removes the empty.
     // context and the function left on the stack by the code
@@ -549,36 +501,342 @@
 }
 
 
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o edi: the JS function object being called
+//   o edx: the new target
+//   o esi: our context
+//   o ebp: the caller's frame pointer
+//   o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+  __ push(edx);  // Callee's new target.
+
+  // Push zero for bytecode array offset.
+  __ push(Immediate(0));
+
+  // Get the bytecode array from the function object and load the pointer to the
+  // first entry into edi (InterpreterBytecodeRegister).
+  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     eax);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+                             BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ mov(ecx, esp);
+    __ sub(ecx, ebx);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ cmp(ecx, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+    __ jmp(&loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(eax);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ sub(ebx, Immediate(kPointerSize));
+    __ j(greater_equal, &loop_header);
+  }
+
+  // TODO(rmcilroy): List of things not currently dealt with here but done in
+  // fullcodegen's prologue:
+  //  - Support profiler (specifically profiling_counter).
+  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+  //  - Allow simulator stop operations if FLAG_stop_at is set.
+  //  - Code aging of the BytecodeArray object.
+
+  // Perform stack guard check.
+  {
+    Label ok;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(masm->isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok);
+    __ push(kInterpreterBytecodeArrayRegister);
+    __ CallRuntime(Runtime::kStackGuard);
+    __ pop(kInterpreterBytecodeArrayRegister);
+    __ bind(&ok);
+  }
+
+  // Load accumulator, register file, bytecode offset, dispatch table into
+  // registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterRegisterFileRegister, ebp);
+  __ add(kInterpreterRegisterFileRegister,
+         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+  // Since the dispatch table root might be set after builtins are generated,
+  // load directly from the roots table.
+  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // Push dispatch table as a stack located parameter to the bytecode handler.
+  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+  __ push(ebx);
+
+  // Dispatch to the first bytecode handler for the function.
+  __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
+  // Restore undefined_value in accumulator (eax)
+  // TODO(rmcilroy): Remove this once we move the dispatch table back into a
+  // register.
+  __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+  // and header removal.
+  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ call(ebx);
+  __ nop();  // Ensure that return address still counts as interpreter entry
+             // trampoline.
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+  // TODO(rmcilroy): List of things not currently dealt with here but done in
+  // fullcodegen's EmitReturnSequence.
+  //  - Supporting FLAG_trace for Runtime::TraceExit.
+  //  - Support profiler (specifically decrementing profiling_counter
+  //    appropriately and calling out to HandleInterrupts if necessary).
+
+  // The return value is in accumulator, which is already in rax.
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments and return.
+  __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+                           BytecodeArray::kParameterSizeOffset));
+  __ pop(ecx);
+  __ add(esp, ebx);
+  __ push(ecx);
+  __ ret(0);
+}
+
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register array_limit) {
+  // ----------- S t a t e -------------
+  //  -- ebx : Pointer to the last argument in the args array.
+  //  -- array_limit : Pointer to one before the first argument in the
+  //                   args array.
+  // -----------------------------------
+  Label loop_header, loop_check;
+  __ jmp(&loop_check);
+  __ bind(&loop_header);
+  __ Push(Operand(ebx, 0));
+  __ sub(ebx, Immediate(kPointerSize));
+  __ bind(&loop_check);
+  __ cmp(ebx, array_limit);
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(edx);
+
+  // Find the address of the last argument.
+  __ mov(ecx, eax);
+  __ add(ecx, Immediate(1));  // Add one for receiver.
+  __ shl(ecx, kPointerSizeLog2);
+  __ neg(ecx);
+  __ add(ecx, ebx);
+
+  Generate_InterpreterPushArgs(masm, ecx);
+
+  // Call the target.
+  __ Push(edx);  // Re-push return address.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target
+  //  -- edi : the constructor
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+
+  // Save number of arguments on the stack below where arguments are going
+  // to be pushed.
+  __ mov(ecx, eax);
+  __ neg(ecx);
+  __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
+  __ mov(eax, ecx);
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(ecx);
+
+  // Find the address of the last argument.
+  __ shl(eax, kPointerSizeLog2);
+  __ add(eax, ebx);
+
+  // Push padding for receiver.
+  __ Push(Immediate(0));
+
+  Generate_InterpreterPushArgs(masm, eax);
+
+  // Restore number of arguments from slot on stack.
+  __ mov(eax, Operand(esp, -kPointerSize));
+
+  // Re-push return address.
+  __ Push(ecx);
+
+  // Call the constructor with unmodified eax, edi, ebi values.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(kInterpreterAccumulatorRegister);  // Save accumulator register.
+
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
+
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+    __ Pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
+    // Tear down internal frame.
+  }
+
+  // Initialize register file register.
+  __ mov(kInterpreterRegisterFileRegister, ebp);
+  __ add(kInterpreterRegisterFileRegister,
+         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+
+  // Get the bytecode array pointer from the frame.
+  __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
+                      InterpreterFrameConstants::kFunctionFromRegisterPointer));
+  __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     ebx);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ mov(
+      kInterpreterBytecodeOffsetRegister,
+      Operand(kInterpreterRegisterFileRegister,
+              InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Push dispatch table as a stack located parameter to the bytecode handler -
+  // overwrite the state slot (we don't use these for interpreter deopts).
+  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+  __ mov(Operand(esp, kPointerSize), ebx);
+
+  // Dispatch to the target bytecode.
+  __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
+
+  // Get the context from the frame.
+  // TODO(rmcilroy): Update interpreter frame to expect current context at the
+  // context slot instead of the function context.
+  __ mov(kContextRegister,
+         Operand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kContextFromRegisterPointer));
+
+  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+  // and header removal.
+  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(ebx);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
   CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
 
-
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the function.
-  __ push(edi);
-  // Function is also the parameter to the runtime call.
-  __ push(edi);
-  // Whether to compile in a background thread.
-  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
-
-  __ CallRuntime(Runtime::kCompileOptimized, 2);
-  // Restore receiver.
-  __ pop(edi);
-}
-
-
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallCompileOptimized(masm, false);
+  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
   GenerateTailCallToReturnedCode(masm);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallCompileOptimized(masm, true);
+  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -659,6 +917,11 @@
 }
 
 
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+
 static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
                                              SaveFPRegsMode save_doubles) {
   // Enter an internal frame.
@@ -669,7 +932,7 @@
     // stubs that tail call the runtime on deopts passing their parameters in
     // registers.
     __ pushad();
-    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
     __ popad();
     // Tear down internal frame.
   }
@@ -696,7 +959,7 @@
 
     // Pass deoptimization type to the runtime system.
     __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
 
     // Tear down internal frame.
   }
@@ -737,323 +1000,318 @@
 }
 
 
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
-  Factory* factory = masm->isolate()->factory();
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
 
-  // 1. Make sure we have at least one argument.
-  { Label done;
+  // 1. Load receiver into eax and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ mov(eax, Operand(esp, kPointerSize));
+    __ JumpIfSmi(eax, &receiver_not_date);
+    __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+    __ j(not_equal, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(edx, Operand::StaticVariable(
+                      ExternalReference::date_cache_stamp(masm->isolate())));
+      __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+      __ j(not_equal, &stamp_mismatch, Label::kNear);
+      __ mov(eax, FieldOperand(
+                      eax, JSDate::kValueOffset + field_index * kPointerSize));
+      __ ret(1 * kPointerSize);
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 0), eax);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ ret(1 * kPointerSize);
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ EnterFrame(StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argArray
+  //  -- esp[8]  : thisArg
+  //  -- esp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into edi, argArray into eax (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg_array, no_this_arg;
+    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ mov(ebx, edx);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
     __ test(eax, eax);
-    __ j(not_zero, &done);
-    __ pop(ebx);
-    __ push(Immediate(factory->undefined_value()));
-    __ push(ebx);
+    __ j(zero, &no_this_arg, Label::kNear);
+    {
+      __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+      __ cmp(eax, Immediate(1));
+      __ j(equal, &no_arg_array, Label::kNear);
+      __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+      __ bind(&no_arg_array);
+    }
+    __ bind(&no_this_arg);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argArray
+  //  -- edi    : receiver
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ j(zero, &receiver_not_callable, Label::kNear);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+  __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+                Label::kNear);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ Set(eax, 0);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // esp[0]           : Return address
+  // esp[8]           : Argument n
+  // esp[16]          : Argument n-1
+  //  ...
+  // esp[8 * n]       : Argument 1
+  // esp[8 * (n + 1)] : Receiver (callable to call)
+  //
+  // eax contains the number of arguments, n, not counting the receiver.
+  //
+  // 1. Make sure we have at least one argument.
+  {
+    Label done;
+    __ test(eax, eax);
+    __ j(not_zero, &done, Label::kNear);
+    __ PopReturnAddressTo(ebx);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ebx);
     __ inc(eax);
     __ bind(&done);
   }
 
-  // 2. Get the function to call (passed as receiver) from the stack, check
-  //    if it is a function.
-  Label slow, non_function;
-  // 1 ~ return address.
-  __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
-  __ JumpIfSmi(edi, &non_function);
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow);
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
 
-
-  // 3a. Patch the first argument if necessary when calling a function.
-  Label shift_arguments;
-  __ Move(edx, Immediate(0));  // indicate regular JS_FUNCTION
-  { Label convert_to_object, use_global_proxy, patch_receiver;
-    // Change context eagerly in case we need the global receiver.
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-    // Do not transform the receiver for strict mode functions.
-    __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-    __ j(not_equal, &shift_arguments);
-
-    // Do not transform the receiver for natives (shared already in ebx).
-    __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
-    __ j(not_equal, &shift_arguments);
-
-    // Compute the receiver in sloppy mode.
-    __ mov(ebx, Operand(esp, eax, times_4, 0));  // First argument.
-
-    // Call ToObject on the receiver if it is not an object, or use the
-    // global object if it is null or undefined.
-    __ JumpIfSmi(ebx, &convert_to_object);
-    __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_proxy);
-    __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_proxy);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(above_equal, &shift_arguments);
-
-    __ bind(&convert_to_object);
-
-    { // In order to preserve argument count.
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(eax);
-      __ push(eax);
-
-      __ push(ebx);
-      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-      __ mov(ebx, eax);
-      __ Move(edx, Immediate(0));  // restore
-
-      __ pop(eax);
-      __ SmiUntag(eax);
-    }
-
-    // Restore the function to edi.
-    __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
-    __ jmp(&patch_receiver);
-
-    __ bind(&use_global_proxy);
-    __ mov(ebx,
-           Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
-    __ bind(&patch_receiver);
-    __ mov(Operand(esp, eax, times_4, 0), ebx);
-
-    __ jmp(&shift_arguments);
-  }
-
-  // 3b. Check for function proxy.
-  __ bind(&slow);
-  __ Move(edx, Immediate(1));  // indicate function proxy
-  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &shift_arguments);
-  __ bind(&non_function);
-  __ Move(edx, Immediate(2));  // indicate non-function
-
-  // 3c. Patch the first argument when calling a non-function.  The
-  //     CALL_NON_FUNCTION builtin expects the non-function callee as
-  //     receiver, so overwrite the first argument which will ultimately
-  //     become the receiver.
-  __ mov(Operand(esp, eax, times_4, 0), edi);
-
-  // 4. Shift arguments and return address one slot down on the stack
+  // 3. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
-  __ bind(&shift_arguments);
-  { Label loop;
+  {
+    Label loop;
     __ mov(ecx, eax);
     __ bind(&loop);
-    __ mov(ebx, Operand(esp, ecx, times_4, 0));
-    __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+    __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
     __ dec(ecx);
     __ j(not_sign, &loop);  // While non-negative (to copy return address).
-    __ pop(ebx);  // Discard copy of return address.
+    __ pop(ebx);            // Discard copy of return address.
     __ dec(eax);  // One fewer argument (first argument is new receiver).
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
-  //     or a function proxy via CALL_FUNCTION_PROXY.
-  { Label function, non_proxy;
-    __ test(edx, edx);
-    __ j(zero, &function);
-    __ Move(ebx, Immediate(0));
-    __ cmp(edx, Immediate(1));
-    __ j(not_equal, &non_proxy);
-
-    __ pop(edx);   // return address
-    __ push(edi);  // re-add proxy object as additional argument
-    __ push(edx);
-    __ inc(eax);
-    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-    __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-           RelocInfo::CODE_TARGET);
-
-    __ bind(&non_proxy);
-    __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-    __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-           RelocInfo::CODE_TARGET);
-    __ bind(&function);
-  }
-
-  // 5b. Get the code to call from the function and check that the number of
-  //     expected arguments matches what we're providing.  If so, jump
-  //     (tail-call) to the code in register edx without checking arguments.
-  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ebx,
-         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  __ SmiUntag(ebx);
-  __ cmp(eax, ebx);
-  __ j(not_equal,
-       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
-  ParameterCount expected(0);
-  __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
 }
 
 
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
-  static const int kArgumentsOffset = 2 * kPointerSize;
-  static const int kReceiverOffset = 3 * kPointerSize;
-  static const int kFunctionOffset = 4 * kPointerSize;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argumentsList
+  //  -- esp[8]  : thisArgument
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
   {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
-    __ push(Operand(ebp, kFunctionOffset));  // push this
-    __ push(Operand(ebp, kArgumentsOffset));  // push arguments
-    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
-    // Check the stack for overflow. We are not trying to catch
-    // interruptions (e.g. debug break and preemption) here, so the "real stack
-    // limit" is checked.
-    Label okay;
-    ExternalReference real_stack_limit =
-        ExternalReference::address_of_real_stack_limit(masm->isolate());
-    __ mov(edi, Operand::StaticVariable(real_stack_limit));
-    // Make ecx the space we have left. The stack might already be overflowed
-    // here which will cause ecx to become negative.
-    __ mov(ecx, esp);
-    __ sub(ecx, edi);
-    // Make edx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ mov(edx, eax);
-    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-    // Check if the arguments will overflow the stack.
-    __ cmp(ecx, edx);
-    __ j(greater, &okay);  // Signed comparison.
-
-    // Out of stack space.
-    __ push(Operand(ebp, 4 * kPointerSize));  // push this
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-    // End of stack check.
-
-    // Push current index and limit.
-    const int kLimitOffset =
-        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-    __ push(eax);  // limit
-    __ push(Immediate(0));  // index
-
-    // Get the receiver.
-    __ mov(ebx, Operand(ebp, kReceiverOffset));
-
-    // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver, use_global_proxy;
-    __ mov(edi, Operand(ebp, kFunctionOffset));
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &push_receiver);
-
-    // Change context eagerly to get the right global object if necessary.
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-    // Compute the receiver.
-    // Do not transform the receiver for strict mode functions.
-    Label call_to_object;
-    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-    __ j(not_equal, &push_receiver);
-
-    Factory* factory = masm->isolate()->factory();
-
-    // Do not transform the receiver for natives (shared already in ecx).
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
-    __ j(not_equal, &push_receiver);
-
-    // Compute the receiver in sloppy mode.
-    // Call ToObject on the receiver if it is not an object, or use the
-    // global object if it is null or undefined.
-    __ JumpIfSmi(ebx, &call_to_object);
-    __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_proxy);
-    __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_proxy);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(above_equal, &push_receiver);
-
-    __ bind(&call_to_object);
-    __ push(ebx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(ebx, eax);
-    __ jmp(&push_receiver);
-
-    __ bind(&use_global_proxy);
-    __ mov(ebx,
-           Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
-    // Push the receiver.
-    __ bind(&push_receiver);
-    __ push(ebx);
-
-    // Copy all arguments from the array to the stack.
-    Label entry, loop;
-    Register receiver = LoadDescriptor::ReceiverRegister();
-    Register key = LoadDescriptor::NameRegister();
-    __ mov(key, Operand(ebp, kIndexOffset));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(receiver, Operand(ebp, kArgumentsOffset));  // load arguments
-
-    if (FLAG_vector_ics) {
-      // TODO(mvstanton): Vector-based ics need additional infrastructure to
-      // be embedded here. For now, just call the runtime.
-      __ push(receiver);
-      __ push(key);
-      __ CallRuntime(Runtime::kGetProperty, 2);
-    } else {
-      // Use inline caching to speed up access to arguments.
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // It is important that we do not have a test instruction after the
-      // call.  A test instruction after the call is used to indicate that
-      // we have generated an inline version of the keyed load.  In this
-      // case, we know that we are not generating a test instruction next.
-    }
-
-    // Push the nth argument.
-    __ push(eax);
-
-    // Update the index on the stack and in register key.
-    __ mov(key, Operand(ebp, kIndexOffset));
-    __ add(key, Immediate(1 << kSmiTagSize));
-    __ mov(Operand(ebp, kIndexOffset), key);
-
-    __ bind(&entry);
-    __ cmp(key, Operand(ebp, kLimitOffset));
-    __ j(not_equal, &loop);
-
-    // Call the function.
-    Label call_proxy;
-    ParameterCount actual(eax);
-    __ Move(eax, key);
-    __ SmiUntag(eax);
-    __ mov(edi, Operand(ebp, kFunctionOffset));
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &call_proxy);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
-    frame_scope.GenerateLeaveFrame();
-    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
-
-    // Call the function proxy.
-    __ bind(&call_proxy);
-    __ push(edi);  // add function proxy as last argument
-    __ inc(eax);
-    __ Move(ebx, Immediate(0));
-    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
-
-    // Leave internal frame.
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ j(equal, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
   }
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ j(zero, &target_not_callable, Label::kNear);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : new.target (optional)
+  //  -- esp[8]  : argumentsList
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // new.target into edx (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ mov(edx, edi);
+    __ j(equal, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edx    : new.target
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ j(zero, &target_not_constructor, Label::kNear);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ j(zero, &new_target_not_constructor, Label::kNear);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edx);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
 }
 
 
@@ -1096,6 +1354,7 @@
 
   // Get the Array function.
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+  __ mov(edx, edi);
 
   if (FLAG_debug_code) {
     // Initial map for the builtin Array function should be a map.
@@ -1115,7 +1374,8 @@
 }
 
 
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax                 : number of arguments
   //  -- edi                 : constructor function
@@ -1123,120 +1383,235 @@
   //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->string_ctor_calls(), 1);
 
-  if (FLAG_debug_code) {
-    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
-    __ cmp(edi, ecx);
-    __ Assert(equal, kUnexpectedStringFunction);
+  // 1. Load the first argument into eax and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ mov(eax, ebx);
   }
 
-  // Load the first argument into eax and get rid of the rest
-  // (including the receiver).
-  Label no_arguments;
-  __ test(eax, eax);
-  __ j(zero, &no_arguments);
-  __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
-  __ pop(ecx);
-  __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
-  __ push(ecx);
-  __ mov(eax, ebx);
+  // 2a. Convert the first argument to a number.
+  ToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 
-  // Lookup the argument in the number to string cache.
-  Label not_cached, argument_is_string;
-  __ LookupNumberStringCache(eax,  // Input.
-                             ebx,  // Result.
-                             ecx,  // Scratch 1.
-                             edx,  // Scratch 2.
-                             &not_cached);
-  __ IncrementCounter(counters->string_ctor_cached_number(), 1);
-  __ bind(&argument_is_string);
+  // 2b. No arguments, return +0 (already in eax).
+  __ bind(&no_arguments);
+  __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- ebx    : argument converted to string
-  //  -- edi    : constructor function
-  //  -- esp[0] : return address
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  // Allocate a JSValue and put the tagged pointer into eax.
-  Label gc_required;
-  __ Allocate(JSValue::kSize,
-              eax,  // Result.
-              ecx,  // New allocation top (we ignore it).
-              no_reg,
-              &gc_required,
-              TAG_OBJECT);
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-  // Set the map.
-  __ LoadGlobalFunctionInitialMap(edi, ecx);
-  if (FLAG_debug_code) {
-    __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
-            JSValue::kSize >> kPointerSizeLog2);
-    __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
-    __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
-    __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+  // 2. Load the first argument into ebx and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ Move(ebx, Smi::FromInt(0));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
   }
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
 
-  // Set properties and elements.
-  Factory* factory = masm->isolate()->factory();
-  __ Move(ecx, Immediate(factory->empty_fixed_array()));
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+  // 3. Make sure ebx is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(ebx, &done_convert);
+    __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(equal, &done_convert);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edi);
+      __ Push(edx);
+      __ Move(eax, ebx);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ Move(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
+    }
+    __ bind(&done_convert);
+  }
 
-  // Set the value.
-  __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+  // 4. Check if new target and constructor differ.
+  Label new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
 
-  // Ensure the object is fully initialized.
-  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+  __ Ret();
 
-  // We're done. Return.
-  __ ret(0);
-
-  // The argument was not found in the number to string cache. Check
-  // if it's a string already before calling the conversion builtin.
-  Label convert_argument;
-  __ bind(&not_cached);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(eax, &convert_argument);
-  Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
-  __ j(NegateCondition(is_string), &convert_argument);
-  __ mov(ebx, eax);
-  __ IncrementCounter(counters->string_ctor_string_value(), 1);
-  __ jmp(&argument_is_string);
-
-  // Invoke the conversion builtin and put the result into ebx.
-  __ bind(&convert_argument);
-  __ IncrementCounter(counters->string_ctor_conversions(), 1);
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(edi);  // Preserve the function.
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-    __ pop(edi);
+    __ Push(ebx);  // the first argument
+    __ Push(edi);  // constructor function
+    __ Push(edx);  // new target
+    __ CallRuntime(Runtime::kNewObject);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
-  __ mov(ebx, eax);
-  __ jmp(&argument_is_string);
+  __ Ret();
+}
 
-  // Load the empty string into ebx, remove the receiver from the
-  // stack, and jump back to the case where the argument is a string.
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into eax and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ mov(eax, ebx);
+  }
+
+  // 2a. At least one argument, return eax if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(eax, &to_string, Label::kNear);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+    __ j(above, &to_string, Label::kNear);
+    __ j(equal, &symbol_descriptive_string, Label::kNear);
+    __ Ret();
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
   __ bind(&no_arguments);
-  __ Move(ebx, Immediate(factory->empty_string()));
-  __ pop(ecx);
-  __ lea(esp, Operand(esp, kPointerSize));
-  __ push(ecx);
-  __ jmp(&argument_is_string);
+  {
+    __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+    __ ret(1 * kPointerSize);
+  }
 
-  // At this point the argument is already a string. Call runtime to
-  // create a string wrapper.
-  __ bind(&gc_required);
-  __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+  // 3a. Convert eax to a string.
+  __ bind(&to_string);
+  {
+    ToStringStub stub(masm->isolate());
+    __ TailCallStub(&stub);
+  }
+
+  // 3b. Convert symbol in eax to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ PopReturnAddressTo(ecx);
+    __ Push(eax);
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+}
+
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into ebx and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ LoadRoot(ebx, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+  }
+
+  // 3. Make sure ebx is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(ebx, &convert, Label::kNear);
+    __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
+    __ j(below, &done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      ToStringStub stub(masm->isolate());
+      __ Push(edi);
+      __ Push(edx);
+      __ Move(eax, ebx);
+      __ CallStub(&stub);
+      __ Move(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+  __ Ret();
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(ebx);
-    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+    __ Push(ebx);  // the first argument
+    __ Push(edi);  // constructor function
+    __ Push(edx);  // new target
+    __ CallRuntime(Runtime::kNewObject);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
-  __ ret(0);
+  __ Ret();
 }
 
 
@@ -1245,24 +1620,24 @@
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
   //  -- ebx : expected number of arguments
-  //  -- edi : function (passed through to callee)
+  //  -- edx : new target (passed through to callee)
   // -----------------------------------
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
   ExternalReference real_stack_limit =
       ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edx, Operand::StaticVariable(real_stack_limit));
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
   // Make ecx the space we have left. The stack might already be overflowed
   // here which will cause ecx to become negative.
   __ mov(ecx, esp);
-  __ sub(ecx, edx);
-  // Make edx the space we need for the array when it is unrolled onto the
+  __ sub(ecx, edi);
+  // Make edi the space we need for the array when it is unrolled onto the
   // stack.
-  __ mov(edx, ebx);
-  __ shl(edx, kPointerSizeLog2);
+  __ mov(edi, ebx);
+  __ shl(edi, kPointerSizeLog2);
   // Check if the arguments will overflow the stack.
-  __ cmp(ecx, edx);
+  __ cmp(ecx, edi);
   __ j(less_equal, stack_overflow);  // Signed comparison.
 }
 
@@ -1301,21 +1676,549 @@
 }
 
 
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(eax, &create_runtime);
+
+    // Load the map of argumentsList into ecx.
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+    // Load native context into ebx.
+    __ mov(ebx, NativeContextOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+    __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+    __ j(equal, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edi);
+      __ Push(edx);
+      __ Push(eax);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(edx);
+      __ Pop(edi);
+      __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+    }
+    __ jmp(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ mov(ebx,
+           FieldOperand(eax, JSObject::kHeaderSize +
+                                 Heap::kArgumentsLengthIndex * kPointerSize));
+    __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+    __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+    __ j(not_equal, &create_runtime);
+    __ SmiUntag(ebx);
+    __ mov(eax, ecx);
+    __ jmp(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(ecx);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmp(ecx, Immediate(FAST_ELEMENTS));
+    __ j(above, &create_runtime);
+    __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+    __ j(equal, &create_runtime);
+    __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+    __ SmiUntag(ebx);
+    __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ neg(ecx);
+    __ add(ecx, esp);
+    __ sar(ecx, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, ebx);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- edi    : target
+  //  -- eax    : args (a FixedArray built from argumentsList)
+  //  -- ebx    : len (number of elements to push from args)
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ movd(xmm0, edx);
+    __ PopReturnAddressTo(edx);
+    __ Move(ecx, Immediate(0));
+    Label done, loop;
+    __ bind(&loop);
+    __ cmp(ecx, ebx);
+    __ j(equal, &done, Label::kNear);
+    __ Push(
+        FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    __ inc(ecx);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(edx);
+    __ movd(edx, xmm0);
+    __ Move(eax, ebx);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+            SharedFunctionInfo::kClassConstructorBitsWithinByte);
+  __ j(not_zero, &class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
+            (1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+  __ j(not_zero, &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : the shared function info.
+    //  -- edi : the function to call (checked to be a JSFunction)
+    //  -- esi : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(ecx);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+      __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+      __ j(above_equal, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy, Label::kNear);
+        __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+                         Label::kNear);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(ecx);
+        }
+        __ jmp(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(eax);
+        __ Push(eax);
+        __ Push(edi);
+        __ mov(eax, ecx);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ mov(ecx, eax);
+        __ Pop(edi);
+        __ Pop(eax);
+        __ SmiUntag(eax);
+      }
+      __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the shared function info.
+  //  -- edi : the function to call (checked to be a JSFunction)
+  //  -- esi : the function context.
+  // -----------------------------------
+
+  __ mov(ebx,
+         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(ebx);
+  ParameterCount actual(eax);
+  ParameterCount expected(ebx);
+  __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ push(edi);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : new.target (only in case of [[Construct]])
+  //  -- edi : target (checked to be a JSBoundFunction)
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into ecx and length of that into ebx.
+  Label no_bound_arguments;
+  __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+  __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ SmiUntag(ebx);
+  __ test(ebx, ebx);
+  __ j(zero, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : new.target (only in case of [[Construct]])
+    //  -- edi : target (checked to be a JSBoundFunction)
+    //  -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- ebx : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+      __ sub(esp, ecx);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+      __ j(greater, &done, Label::kNear);  // Signed comparison.
+      // Restore the stack pointer.
+      __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Adjust effective number of arguments to include return address.
+    __ inc(eax);
+
+    // Relocate arguments and return address down the stack.
+    {
+      Label loop;
+      __ Set(ecx, 0);
+      __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+      __ bind(&loop);
+      __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0));
+      __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0);
+      __ inc(ecx);
+      __ cmp(ecx, eax);
+      __ j(less, &loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+      __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+      __ bind(&loop);
+      __ dec(ebx);
+      __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size,
+                                 FixedArray::kHeaderSize));
+      __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0);
+      __ lea(eax, Operand(eax, 1));
+      __ j(greater, &loop);
+    }
+
+    // Adjust effective number of arguments (eax contains the number of
+    // arguments from the call plus return address plus the number of
+    // [[BoundArguments]]), so we need to subtract one for the return address.
+    __ dec(eax);
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  // Patch the receiver to [[BoundThis]].
+  __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(ExternalReference(
+                  Builtins::kCall_ReceiverIsAny, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(edi, &non_callable);
+  __ bind(&non_smi);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+       RelocInfo::CODE_TARGET);
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+       RelocInfo::CODE_TARGET);
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ PushReturnAddressFrom(ecx);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ add(eax, Immediate(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Check if target has a [[Call]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ j(zero, &non_callable, Label::kNear);
+  // Overwrite the original receiver with the (original) target.
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Calling convention for function specific ConstructStubs require
+  // ebx to contain either an AllocationSite or undefined.
+  __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label done;
+    __ cmp(edi, edx);
+    __ j(not_equal, &done, Label::kNear);
+    __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&done);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(
+                  ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the constructor to call (checked to be a JSProxy)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  // Include the pushed new_target, constructor and the receiver.
+  __ add(eax, Immediate(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- edi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+
+  // Dispatch based on instance type.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Construct]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ j(zero, &non_constructor, Label::kNear);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+       RelocInfo::CODE_TARGET);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
   //  -- ebx : expected number of arguments
+  //  -- edx : new target (passed through to callee)
   //  -- edi : function (passed through to callee)
   // -----------------------------------
 
-  Label invoke, dont_adapt_arguments;
+  Label invoke, dont_adapt_arguments, stack_overflow;
   __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
 
-  Label stack_overflow;
-  ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
   Label enough, too_few;
-  __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ cmp(eax, ebx);
   __ j(less, &too_few);
   __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1324,25 +2227,52 @@
   {  // Enough parameters: Actual >= expected.
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
 
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
-    __ lea(eax, Operand(ebp, eax, times_4, offset));
-    __ mov(edi, -1);  // account for receiver
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    __ mov(eax, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ inc(edi);
-    __ push(Operand(eax, 0));
-    __ sub(eax, Immediate(kPointerSize));
-    __ cmp(edi, ebx);
+    __ inc(eax);
+    __ push(Operand(edi, 0));
+    __ sub(edi, Immediate(kPointerSize));
+    __ cmp(eax, ebx);
     __ j(less, &copy);
+    // eax now contains the expected number of arguments.
     __ jmp(&invoke);
   }
 
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
+
+    // If the function is strong we need to throw an error.
+    Label no_strong_error;
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
+              1 << SharedFunctionInfo::kStrongModeBitWithinByte);
+    __ j(equal, &no_strong_error, Label::kNear);
+
+    // What we really care about is the required number of arguments.
+    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
+    __ SmiUntag(ecx);
+    __ cmp(eax, ecx);
+    __ j(greater_equal, &no_strong_error, Label::kNear);
+
+    {
+      FrameScope frame(masm, StackFrame::MANUAL);
+      EnterArgumentsAdaptorFrame(masm);
+      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
+    }
+
+    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Remember expected arguments in ecx.
+    __ mov(ecx, ebx);
 
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1368,13 +2298,20 @@
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
     __ cmp(eax, ebx);
     __ j(less, &fill);
+
+    // Restore expected arguments.
+    __ mov(eax, ecx);
   }
 
   // Call the entry point.
   __ bind(&invoke);
   // Restore function pointer.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ call(edx);
+  // eax : expected number of arguments
+  // edx : new target (passed through to callee)
+  // edi : function (passed through to callee)
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ call(ecx);
 
   // Store offset of return address for deoptimizer.
   masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1387,18 +2324,128 @@
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ jmp(edx);
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ jmp(ecx);
 
   __ bind(&stack_overflow);
   {
     FrameScope frame(masm, StackFrame::MANUAL);
-    EnterArgumentsAdaptorFrame(masm);
-    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
     __ int3();
   }
 }
 
 
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Label* receiver_check_failed) {
+  // If there is no signature, return the holder.
+  __ CompareRoot(FieldOperand(function_template_info,
+                              FunctionTemplateInfo::kSignatureOffset),
+                 Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // Walk the prototype chain.
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(scratch0, scratch0, scratch1);
+  __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+  Label next_prototype;
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Get the constructor's signature.
+  __ mov(scratch0,
+         FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(scratch0,
+         FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmp(scratch0, FieldOperand(function_template_info,
+                                FunctionTemplateInfo::kSignatureOffset));
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+  __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Otherwise load the parent function template and iterate.
+  __ mov(scratch0,
+         FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+  __ jmp(&function_template_loop, Label::kNear);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+  // End if the prototype is null or not hidden.
+  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+  __ j(equal, receiver_check_failed);
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ test(FieldOperand(scratch0, Map::kBitField3Offset),
+          Immediate(Map::IsHiddenPrototype::kMask));
+  __ j(zero, receiver_check_failed);
+  // Iterate.
+  __ jmp(&prototype_loop_start, Label::kNear);
+
+  __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                : number of arguments (not including the receiver)
+  //  -- edi                : callee
+  //  -- esi                : context
+  //  -- esp[0]             : return address
+  //  -- esp[4]             : last argument
+  //  -- ...
+  //  -- esp[eax * 4]       : first argument
+  //  -- esp[(eax + 1) * 4] : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+  __ Push(eax);
+  CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+  __ Pop(eax);
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+  __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(edx);
+
+  // Compatible receiver check failed: pop return address, arguments and
+  // receiver and throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  __ Pop(eax);
+  __ PopReturnAddressTo(ebx);
+  __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+  __ add(esp, eax);
+  __ PushReturnAddressFrom(ebx);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+  }
+}
+
+
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
   // Lookup the function in the JavaScript frame.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1406,7 +2453,7 @@
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Pass function as argument.
     __ push(eax);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
   }
 
   Label skip;
@@ -1445,7 +2492,7 @@
   __ j(above_equal, &ok, Label::kNear);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard);
   }
   __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
          RelocInfo::CODE_TARGET);
@@ -1455,7 +2502,7 @@
 }
 
 #undef __
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index b75ae3a..6e597e2 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2,19 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/ia32/code-stubs-ia32.h"
+#include "src/ia32/frames-ia32.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 #include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -36,7 +37,7 @@
                            JS_FUNCTION_STUB_MODE);
   } else {
     descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE);
   }
 }
 
@@ -55,7 +56,7 @@
                            JS_FUNCTION_STUB_MODE);
   } else {
     descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE);
   }
 }
 
@@ -105,15 +106,15 @@
   isolate()->counters()->code_stubs()->Increment();
 
   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
-  int param_count = descriptor.GetEnvironmentParameterCount();
+  int param_count = descriptor.GetRegisterParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
     DCHECK(param_count == 0 ||
-           eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+           eax.is(descriptor.GetRegisterParameter(param_count - 1)));
     // Push arguments
     for (int i = 0; i < param_count; ++i) {
-      __ push(descriptor.GetEnvironmentParameterRegister(i));
+      __ push(descriptor.GetRegisterParameter(i));
     }
     __ CallExternalReference(miss, param_count);
   }
@@ -616,7 +617,7 @@
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT);
 
     // The stub is called from non-optimized code, which expects the result
     // as heap number in exponent.
@@ -652,19 +653,14 @@
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
-  if (FLAG_vector_ics) {
-    // With careful management, we won't have to save slot and vector on
-    // the stack. Simply handle the possibly missing case first.
-    // TODO(mvstanton): this code can be more efficient.
-    __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
-           Immediate(isolate()->factory()->the_hole_value()));
-    __ j(equal, &miss);
-    __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
-    __ ret(0);
-  } else {
-    NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
-                                                            ebx, &miss);
-  }
+  // With careful management, we won't have to save slot and vector on
+  // the stack. Simply handle the possibly missing case first.
+  // TODO(mvstanton): this code can be more efficient.
+  __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+         Immediate(isolate()->factory()->the_hole_value()));
+  __ j(equal, &miss);
+  __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+  __ ret(0);
 
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
@@ -692,9 +688,7 @@
   __ push(scratch);   // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref = ExternalReference(
-      IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
 
   __ bind(&slow);
   PropertyAccessCompiler::TailCallBuiltin(
@@ -712,9 +706,8 @@
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   Register result = eax;
   DCHECK(!result.is(scratch));
-  DCHECK(!FLAG_vector_ics ||
-         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
-          result.is(VectorLoadICDescriptor::SlotRegister())));
+  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+         result.is(LoadDescriptor::SlotRegister()));
 
   // StringCharAtGenerator doesn't use the result register until it's passed
   // the different miss possibilities. If it did, we would have a conflict
@@ -729,7 +722,7 @@
   __ ret(0);
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
+  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
 
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
@@ -794,77 +787,83 @@
   __ pop(ebx);  // Return address.
   __ push(edx);
   __ push(ebx);
-  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+  __ TailCallRuntime(Runtime::kArguments);
 }
 
 
 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // edi : function
   // esp[0] : return address
-  // esp[4] : number of parameters
-  // esp[8] : receiver displacement
-  // esp[12] : function
+
+  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
 
   // Check if the calling frame is an arguments adaptor frame.
   Label runtime;
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &runtime, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ mov(Operand(esp, 1 * kPointerSize), ecx);
-  __ lea(edx, Operand(edx, ecx, times_2,
-              StandardFrameConstants::kCallerSPOffset));
-  __ mov(Operand(esp, 2 * kPointerSize), edx);
+  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx,
+         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+  __ pop(eax);   // Pop return address.
+  __ push(edi);  // Push function.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ecx);  // Push parameter count.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
 }
 
 
 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // edi : function
   // esp[0] : return address
-  // esp[4] : number of parameters (tagged)
-  // esp[8] : receiver displacement
-  // esp[12] : function
 
-  // ebx = parameter count (tagged)
-  __ mov(ebx, Operand(esp, 1 * kPointerSize));
+  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
 
   // Check if the calling frame is an arguments adaptor frame.
-  // TODO(rossberg): Factor out some of the bits that are shared with the other
-  // Generate* functions.
-  Label runtime;
-  Label adaptor_frame, try_allocate;
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  Label adaptor_frame, try_allocate, runtime;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
-  __ mov(ecx, ebx);
+  __ mov(ebx, ecx);
+  __ push(ecx);
   __ jmp(&try_allocate, Label::kNear);
 
   // We have an adaptor frame. Patch the parameters pointer.
   __ bind(&adaptor_frame);
+  __ mov(ebx, ecx);
+  __ push(ecx);
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ lea(edx, Operand(edx, ecx, times_2,
                       StandardFrameConstants::kCallerSPOffset));
-  __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   // ebx = parameter count (tagged)
   // ecx = argument count (smi-tagged)
-  // esp[4] = parameter count (tagged)
-  // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
   __ cmp(ebx, ecx);
   __ j(less_equal, &try_allocate, Label::kNear);
   __ mov(ebx, ecx);
 
+  // Save mapped parameter count and function.
   __ bind(&try_allocate);
-
-  // Save mapped parameter count.
+  __ push(edi);
   __ push(ebx);
 
   // Compute the sizes of backing store, parameter map, and arguments object.
@@ -884,17 +883,16 @@
   __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
 
   // eax = address of new object(s) (tagged)
   // ecx = argument count (smi-tagged)
   // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
   // esp[8] = parameter count (tagged)
-  // esp[12] = address of receiver argument
   // Get the arguments map from the current native context into edi.
   Label has_mapped_parameters, instantiate;
-  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+  __ mov(edi, NativeContextOperand());
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
   __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -904,9 +902,8 @@
   __ jmp(&instantiate, Label::kNear);
 
   __ bind(&has_mapped_parameters);
-  __ mov(
-      edi,
-      Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX)));
+  __ mov(edi, Operand(edi, Context::SlotOffset(
+                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
   __ bind(&instantiate);
 
   // eax = address of new object (tagged)
@@ -914,8 +911,8 @@
   // ecx = argument count (smi-tagged)
   // edi = address of arguments map (tagged)
   // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
   // esp[8] = parameter count (tagged)
-  // esp[12] = address of receiver argument
   // Copy the JS object part.
   __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -925,11 +922,11 @@
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ mov(edx, Operand(esp, 4 * kPointerSize));
-  __ AssertNotSmi(edx);
+  __ mov(edi, Operand(esp, 1 * kPointerSize));
+  __ AssertNotSmi(edi);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                      Heap::kArgumentsCalleeIndex * kPointerSize),
-         edx);
+                               Heap::kArgumentsCalleeIndex * kPointerSize),
+         edi);
 
   // Use the length (smi tagged) and set that as an in-object property too.
   __ AssertSmi(ecx);
@@ -947,11 +944,13 @@
   // eax = address of new object (tagged)
   // ebx = mapped parameter count (tagged)
   // ecx = argument count (tagged)
+  // edx = address of receiver argument
   // edi = address of parameter map or backing store (tagged)
   // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
   // esp[8] = parameter count (tagged)
-  // esp[12] = address of receiver argument
-  // Free a register.
+  // Free two registers.
+  __ push(edx);
   __ push(eax);
 
   // Initialize parameter map. If there are no mapped arguments, we're done.
@@ -977,9 +976,9 @@
   // We loop from right to left.
   Label parameters_loop, parameters_test;
   __ push(ecx);
-  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ add(ebx, Operand(esp, 4 * kPointerSize));
+  __ add(ebx, Operand(esp, 5 * kPointerSize));
   __ sub(ebx, eax);
   __ mov(ecx, isolate()->factory()->the_hole_value());
   __ mov(edx, edi);
@@ -991,9 +990,10 @@
   // edi = address of backing store (tagged)
   // esp[0] = argument count (tagged)
   // esp[4] = address of new object (tagged)
-  // esp[8] = mapped parameter count (tagged)
-  // esp[16] = parameter count (tagged)
-  // esp[20] = address of receiver argument
+  // esp[8] = address of receiver argument
+  // esp[12] = mapped parameter count (tagged)
+  // esp[16] = function
+  // esp[20] = parameter count (tagged)
   __ jmp(&parameters_test, Label::kNear);
 
   __ bind(&parameters_loop);
@@ -1011,17 +1011,18 @@
   // ecx = argument count (tagged)
   // edi = address of backing store (tagged)
   // esp[0] = address of new object (tagged)
-  // esp[4] = mapped parameter count (tagged)
-  // esp[12] = parameter count (tagged)
-  // esp[16] = address of receiver argument
+  // esp[4] = address of receiver argument
+  // esp[8] = mapped parameter count (tagged)
+  // esp[12] = function
+  // esp[16] = parameter count (tagged)
   // Copy arguments header and remaining slots (if there are any).
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
          Immediate(isolate()->factory()->fixed_array_map()));
   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
 
   Label arguments_loop, arguments_test;
-  __ mov(ebx, Operand(esp, 1 * kPointerSize));
-  __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
   __ sub(edx, ebx);
   __ jmp(&arguments_test, Label::kNear);
@@ -1038,62 +1039,64 @@
 
   // Restore.
   __ pop(eax);  // Address of arguments object.
-  __ pop(ebx);  // Parameter count.
+  __ Drop(4);
 
-  // Return and remove the on-stack parameters.
-  __ ret(3 * kPointerSize);
+  // Return.
+  __ ret(0);
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ pop(eax);  // Remove saved parameter count.
-  __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+  __ pop(eax);   // Remove saved mapped parameter count.
+  __ pop(edi);   // Pop saved function.
+  __ pop(eax);   // Remove saved parameter count.
+  __ pop(eax);   // Pop return address.
+  __ push(edi);  // Push function.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ecx);  // Push parameter count.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
 }
 
 
 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // edi : function
   // esp[0] : return address
-  // esp[4] : number of parameters
-  // esp[8] : receiver displacement
-  // esp[12] : function
+
+  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
 
   // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame, Label::kNear);
-
-  // Get the length from the frame.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));
-  __ jmp(&try_allocate, Label::kNear);
+  Label try_allocate, runtime;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &try_allocate, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ mov(Operand(esp, 1 * kPointerSize), ecx);
-  __ lea(edx, Operand(edx, ecx, times_2,
-                      StandardFrameConstants::kCallerSPOffset));
-  __ mov(Operand(esp, 2 * kPointerSize), edx);
+  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx,
+         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ test(ecx, ecx);
+  __ mov(eax, ecx);
+  __ test(eax, eax);
   __ j(zero, &add_arguments_object, Label::kNear);
-  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+  __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
+  __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
 
   // Do the allocation of both objects in one go.
-  __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+  __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
 
   // Get the arguments map from the current native context.
-  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
-  const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
-  __ mov(edi, Operand(edi, offset));
+  __ mov(edi, NativeContextOperand());
+  __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
 
   __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -1103,7 +1106,6 @@
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));
   __ AssertSmi(ecx);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsLengthIndex * kPointerSize),
@@ -1114,17 +1116,14 @@
   __ test(ecx, ecx);
   __ j(zero, &done, Label::kNear);
 
-  // Get the parameters pointer from the stack.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-
   // Set up the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
          Immediate(isolate()->factory()->fixed_array_map()));
-
   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
   // Untag the length for the loop below.
   __ SmiUntag(ecx);
 
@@ -1138,13 +1137,46 @@
   __ dec(ecx);
   __ j(not_zero, &loop);
 
-  // Return and remove the on-stack parameters.
+  // Return.
   __ bind(&done);
-  __ ret(3 * kPointerSize);
+  __ ret(0);
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+  __ pop(eax);   // Pop return address.
+  __ push(edi);  // Push function.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ecx);  // Push parameter count.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // ebx : rest parameter index (tagged)
+  // esp[0] : return address
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &runtime);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx,
+         Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+  __ bind(&runtime);
+  __ pop(eax);   // Save return address.
+  __ push(ecx);  // Push number of parameters.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ebx);  // Push rest parameter index.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewRestParam);
 }
 
 
@@ -1153,7 +1185,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExec);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -1429,22 +1461,9 @@
   __ mov(eax, Operand::StaticVariable(pending_exception));
   __ cmp(edx, eax);
   __ j(equal, &runtime);
+
   // For exception, throw the exception again.
-
-  // Clear the pending exception variable.
-  __ mov(Operand::StaticVariable(pending_exception), edx);
-
-  // Special handling of termination exceptions which are uncatchable
-  // by javascript code.
-  __ cmp(eax, factory->termination_exception());
-  Label throw_termination_exception;
-  __ j(equal, &throw_termination_exception, Label::kNear);
-
-  // Handle normal exception by following handler chain.
-  __ Throw(eax);
-
-  __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(eax);
+  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
 
   __ bind(&failure);
   // For failure to match, return null.
@@ -1536,7 +1555,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
   // (7) Not a long external string?  If yes, go to (10).
@@ -1635,7 +1654,7 @@
 
 
 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
-  Label check_unequal_objects;
+  Label runtime_call, check_unequal_objects;
   Condition cc = GetCondition();
 
   Label miss;
@@ -1669,23 +1688,42 @@
     if (cc != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      Label check_for_nan;
       __ cmp(edx, isolate()->factory()->undefined_value());
-      __ j(not_equal, &check_for_nan, Label::kNear);
-      __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
-      __ ret(0);
-      __ bind(&check_for_nan);
+      if (is_strong(strength())) {
+        // In strong mode, this comparison must throw, so call the runtime.
+        __ j(equal, &runtime_call, Label::kFar);
+      } else {
+        Label check_for_nan;
+        __ j(not_equal, &check_for_nan, Label::kNear);
+        __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+        __ ret(0);
+        __ bind(&check_for_nan);
+      }
     }
 
     // Test for NaN. Compare heap numbers in a general way,
-    // to hanlde NaNs correctly.
+    // to handle NaNs correctly.
     __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
            Immediate(isolate()->factory()->heap_number_map()));
     __ j(equal, &generic_heap_number_comparison, Label::kNear);
     if (cc != equal) {
+      __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
       // Call runtime on identical JSObjects.  Otherwise return equal.
-      __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-      __ j(above_equal, &not_identical);
+      __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
+      __ j(above_equal, &runtime_call, Label::kFar);
+      // Call runtime on identical symbols since we need to throw a TypeError.
+      __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+      __ j(equal, &runtime_call, Label::kFar);
+      // Call runtime on identical SIMD values since we must throw a TypeError.
+      __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+      __ j(equal, &runtime_call, Label::kFar);
+      if (is_strong(strength())) {
+        // We have already tested for smis and heap numbers, so if both
+        // arguments are not strings we must proceed to the slow case.
+        __ test(ecx, Immediate(kIsNotStringMask));
+        __ j(not_zero, &runtime_call, Label::kFar);
+      }
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
     __ ret(0);
@@ -1706,7 +1744,7 @@
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
     STATIC_ASSERT(kSmiTag == 0);
-    DCHECK_EQ(0, Smi::FromInt(0));
+    DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, eax);
     __ test(ecx, edx);
@@ -1740,8 +1778,8 @@
     // Get the type of the first operand.
     // If the first object is a JS object, we have done pointer comparison.
     Label first_non_object;
-    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
     __ j(below, &first_non_object, Label::kNear);
 
     // Return non-zero (eax is not zero)
@@ -1755,7 +1793,7 @@
     __ CmpInstanceType(ecx, ODDBALL_TYPE);
     __ j(equal, &return_not_equal);
 
-    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
     __ j(above_equal, &return_not_equal);
 
     // Check for oddballs: true, false, null, undefined.
@@ -1830,7 +1868,6 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label not_both_objects;
     Label return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
@@ -1839,11 +1876,11 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_both_objects, Label::kNear);
-    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(below, &not_both_objects, Label::kNear);
-    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
-    __ j(below, &not_both_objects, Label::kNear);
+    __ j(not_zero, &runtime_call, Label::kNear);
+    __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+    __ j(below, &runtime_call, Label::kNear);
+    __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
+    __ j(below, &runtime_call, Label::kNear);
     // We do not bail out after this point.  Both are JSObjects, and
     // they are equal if and only if both are undetectable.
     // The and of the undetectable flags is 1 if and only if they are equal.
@@ -1860,8 +1897,8 @@
     // Return non-equal by returning the non-zero object pointer in eax,
     // or return equal if we fell through to here.
     __ ret(0);  // rax, rdx were pushed
-    __ bind(&not_both_objects);
   }
+  __ bind(&runtime_call);
 
   // Push arguments below the return address.
   __ pop(ecx);
@@ -1869,32 +1906,59 @@
   __ push(eax);
 
   // Figure out which native to call and setup the arguments.
-  Builtins::JavaScript builtin;
   if (cc == equal) {
-    builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+    __ push(ecx);
+    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
   } else {
-    builtin = Builtins::COMPARE;
     __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+
+    // Restore return address on the stack.
+    __ push(ecx);
+
+    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+    // tagged as a small integer.
+    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+                                             : Runtime::kCompare);
   }
 
-  // Restore return address on the stack.
-  __ push(ecx);
-
-  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
 
 
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+  // eax : number of arguments to the construct function
+  // ebx : feedback vector
+  // edx : slot in feedback vector (Smi)
+  // edi : the function to call
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Number-of-arguments register must be smi-tagged to call out.
+    __ SmiTag(eax);
+    __ push(eax);
+    __ push(edi);
+    __ push(edx);
+    __ push(ebx);
+
+    __ CallStub(stub);
+
+    __ pop(ebx);
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+}
+
+
 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   // Cache the called function in a feedback vector slot.  Cache states
   // are uninitialized, monomorphic (indicated by a JSFunction), and
   // megamorphic.
   // eax : number of arguments to the construct function
-  // ebx : Feedback vector
+  // ebx : feedback vector
   // edx : slot in feedback vector (Smi)
   // edi : the function to call
   Isolate* isolate = masm->isolate();
@@ -1906,32 +1970,41 @@
 
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
-  __ cmp(ecx, edi);
+  // We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
+  // at this position in a symbol (see static asserts in
+  // type-feedback-vector.h).
+  Label check_allocation_site;
+  __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
   __ j(equal, &done, Label::kFar);
-  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
   __ j(equal, &done, Label::kFar);
+  __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
+                 Heap::kWeakCellMapRootIndex);
+  __ j(not_equal, &check_allocation_site);
 
-  if (!FLAG_pretenuring_call_new) {
-    // If we came here, we need to see if we are the array function.
-    // If we didn't have a matching function, and we didn't find the megamorph
-    // sentinel, then we have in the slot either some other function or an
-    // AllocationSite. Do a map check on the object in ecx.
-    Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
-    __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
-    __ j(not_equal, &miss);
+  // If the weak cell is cleared, we have a new chance to become monomorphic.
+  __ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
+  __ jmp(&megamorphic);
 
-    // Make sure the function is the Array() function
-    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
-    __ cmp(edi, ecx);
-    __ j(not_equal, &megamorphic);
-    __ jmp(&done, Label::kFar);
-  }
+  __ bind(&check_allocation_site);
+  // If we came here, we need to see if we are the array function.
+  // If we didn't have a matching function, and we didn't find the megamorph
+  // sentinel, then we have in the slot either some other function or an
+  // AllocationSite.
+  __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is the Array() function
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
+  __ j(not_equal, &megamorphic);
+  __ jmp(&done, Label::kFar);
 
   __ bind(&miss);
 
   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   // megamorphic.
-  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ CompareRoot(ecx, Heap::kuninitialized_symbolRootIndex);
   __ j(equal, &initialize);
   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   // write-barrier is needed.
@@ -1944,342 +2017,154 @@
   // An uninitialized cache is patched with the function or sentinel to
   // indicate the ElementsKind if function is the Array constructor.
   __ bind(&initialize);
-  if (!FLAG_pretenuring_call_new) {
-    // Make sure the function is the Array() function
-    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
-    __ cmp(edi, ecx);
-    __ j(not_equal, &not_array_function);
+  // Make sure the function is the Array() function
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
+  __ j(not_equal, &not_array_function);
 
-    // The target function is the Array constructor,
-    // Create an AllocationSite if we don't already have it, store it in the
-    // slot.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
+  // The target function is the Array constructor,
+  // Create an AllocationSite if we don't already have it, store it in the
+  // slot.
+  CreateAllocationSiteStub create_stub(isolate);
+  CallStubInRecordCallTarget(masm, &create_stub);
+  __ jmp(&done);
 
-      // Arguments register must be smi-tagged to call out.
-      __ SmiTag(eax);
-      __ push(eax);
-      __ push(edi);
-      __ push(edx);
-      __ push(ebx);
-
-      CreateAllocationSiteStub create_stub(isolate);
-      __ CallStub(&create_stub);
-
-      __ pop(ebx);
-      __ pop(edx);
-      __ pop(edi);
-      __ pop(eax);
-      __ SmiUntag(eax);
-    }
-    __ jmp(&done);
-
-    __ bind(&not_array_function);
-  }
-
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize),
-         edi);
-  // We won't need edx or ebx anymore, just save edi
-  __ push(edi);
-  __ push(ebx);
-  __ push(edx);
-  __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ pop(edx);
-  __ pop(ebx);
-  __ pop(edi);
-
+  __ bind(&not_array_function);
+  CreateWeakCellStub weak_cell_stub(isolate);
+  CallStubInRecordCallTarget(masm, &weak_cell_stub);
   __ bind(&done);
 }
 
 
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
-  // Do not transform the receiver for strict mode functions.
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-  __ j(not_equal, cont);
-
-  // Do not transform the receiver for natives (shared already in ecx).
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-            1 << SharedFunctionInfo::kNativeBitWithinByte);
-  __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(Isolate* isolate,
-                         MacroAssembler* masm,
-                         int argc,
-                         Label* non_function) {
-  // Check for function proxy.
-  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
-  __ j(not_equal, non_function);
-  __ pop(ecx);
-  __ push(edi);  // put proxy as additional argument under return address
-  __ push(ecx);
-  __ Move(eax, Immediate(argc + 1));
-  __ Move(ebx, Immediate(0));
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-  {
-    Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
-    __ jmp(adaptor, RelocInfo::CODE_TARGET);
-  }
-
-  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
-  // of the original receiver from the call site).
-  __ bind(non_function);
-  __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
-  __ Move(eax, Immediate(argc));
-  __ Move(ebx, Immediate(0));
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
-  __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
-  // Wrap the receiver and patch it back onto the stack.
-  { FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    __ push(edi);
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ pop(edi);
-  }
-  __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
-  __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
-                                   int argc, bool needs_checks,
-                                   bool call_as_method) {
-  // edi : the function to call
-  Label slow, non_function, wrap, cont;
-
-  if (needs_checks) {
-    // Check that the function really is a JavaScript function.
-    __ JumpIfSmi(edi, &non_function);
-
-    // Goto slow case if we do not have a function.
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &slow);
-  }
-
-  // Fast-case: Just invoke the function.
-  ParameterCount actual(argc);
-
-  if (call_as_method) {
-    if (needs_checks) {
-      EmitContinueIfStrictOrNative(masm, &cont);
-    }
-
-    // Load the receiver from the stack.
-    __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
-    if (needs_checks) {
-      __ JumpIfSmi(eax, &wrap);
-
-      __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-      __ j(below, &wrap);
-    } else {
-      __ jmp(&wrap);
-    }
-
-    __ bind(&cont);
-  }
-
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
-  if (needs_checks) {
-    // Slow-case: Non-function called.
-    __ bind(&slow);
-    // (non_function is bound in EmitSlowCase)
-    EmitSlowCase(masm->isolate(), masm, argc, &non_function);
-  }
-
-  if (call_as_method) {
-    __ bind(&wrap);
-    EmitWrapCase(masm, argc, &cont);
-  }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
 void CallConstructStub::Generate(MacroAssembler* masm) {
   // eax : number of arguments
   // ebx : feedback vector
-  // edx : (only if ebx is not the megamorphic symbol) slot in feedback
-  //       vector (Smi)
+  // edx : slot in feedback vector (Smi, for RecordCallTarget)
   // edi : constructor function
-  Label slow, non_function_call;
 
+  Label non_function;
   // Check that function is not a smi.
-  __ JumpIfSmi(edi, &non_function_call);
+  __ JumpIfSmi(edi, &non_function);
   // Check that function is a JSFunction.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow);
+  __ j(not_equal, &non_function);
 
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
+  GenerateRecordCallTarget(masm);
 
-    if (FLAG_pretenuring_call_new) {
-      // Put the AllocationSite from the feedback vector into ebx.
-      // By adding kPointerSize we encode that we know the AllocationSite
-      // entry is at the feedback vector slot given by edx + 1.
-      __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-    } else {
-      Label feedback_register_initialized;
-      // Put the AllocationSite from the feedback vector into ebx, or undefined.
-      __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-      Handle<Map> allocation_site_map =
-          isolate()->factory()->allocation_site_map();
-      __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
-      __ j(equal, &feedback_register_initialized);
-      __ mov(ebx, isolate()->factory()->undefined_value());
-      __ bind(&feedback_register_initialized);
-    }
+  Label feedback_register_initialized;
+  // Put the AllocationSite from the feedback vector into ebx, or undefined.
+  __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+  __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+  __ j(equal, &feedback_register_initialized);
+  __ mov(ebx, isolate()->factory()->undefined_value());
+  __ bind(&feedback_register_initialized);
 
-    __ AssertUndefinedOrAllocationSite(ebx);
-  }
+  __ AssertUndefinedOrAllocationSite(ebx);
 
-  // Jump to the function-specific construct stub.
-  Register jmp_reg = ecx;
-  __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(jmp_reg, FieldOperand(jmp_reg,
-                               SharedFunctionInfo::kConstructStubOffset));
-  __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
-  __ jmp(jmp_reg);
+  // Pass new target to construct stub.
+  __ mov(edx, edi);
 
-  // edi: called object
-  // eax: number of arguments
-  // ecx: object map
-  Label do_call;
-  __ bind(&slow);
-  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
-  __ j(not_equal, &non_function_call);
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
-  __ jmp(&do_call);
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
 
-  __ bind(&non_function_call);
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ bind(&do_call);
-  // Set expected number of arguments to zero (not changing eax).
-  __ Move(ebx, Immediate(0));
-  Handle<Code> arguments_adaptor =
-      isolate()->builtins()->ArgumentsAdaptorTrampoline();
-  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+  __ bind(&non_function);
+  __ mov(edx, edi);
+  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
 
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
-  __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(vector, FieldOperand(vector,
-                              SharedFunctionInfo::kFeedbackVectorOffset));
-}
-
-
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   // edi - function
   // edx - slot id
-  Label miss;
-  int argc = arg_count();
-  ParameterCount actual(argc);
-
-  EmitLoadTypeFeedbackVector(masm, ebx);
-
+  // ebx - vector
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
   __ cmp(edi, ecx);
-  __ j(not_equal, &miss);
+  __ j(not_equal, miss);
 
   __ mov(eax, arg_count());
+  // Reload ecx.
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
 
-  // Verify that ecx contains an AllocationSite
-  Factory* factory = masm->isolate()->factory();
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         factory->allocation_site_map());
-  __ j(not_equal, &miss);
+  // Increment the call count for monomorphic function calls.
+  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
 
   __ mov(ebx, ecx);
+  __ mov(edx, edi);
   ArrayConstructorStub stub(masm->isolate(), arg_count());
   __ TailCallStub(&stub);
 
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  // The slow case, we need this no matter what to complete a call after a miss.
-  CallFunctionNoFeedback(masm,
-                         arg_count(),
-                         true,
-                         CallAsMethod());
-
   // Unreachable.
-  __ int3();
 }
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
   // edi - function
   // edx - slot id
+  // ebx - vector
   Isolate* isolate = masm->isolate();
-  const int with_types_offset =
-      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-  const int generic_offset =
-      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-  Label extra_checks_or_miss, slow_start;
-  Label slow, non_function, wrap, cont;
-  Label have_js_function;
+  Label extra_checks_or_miss, call, call_function;
   int argc = arg_count();
   ParameterCount actual(argc);
 
-  EmitLoadTypeFeedbackVector(masm, ebx);
-
   // The checks. First, does edi match the recorded monomorphic target?
-  __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-  __ j(not_equal, &extra_checks_or_miss);
-
-  __ bind(&have_js_function);
-  if (CallAsMethod()) {
-    EmitContinueIfStrictOrNative(masm, &cont);
-
-    // Load the receiver from the stack.
-    __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
-    __ JumpIfSmi(eax, &wrap);
-
-    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(below, &wrap);
-
-    __ bind(&cont);
-  }
-
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
-  __ bind(&slow);
-  EmitSlowCase(isolate, masm, argc, &non_function);
-
-  if (CallAsMethod()) {
-    __ bind(&wrap);
-    EmitWrapCase(masm, argc, &cont);
-  }
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss;
-
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
+
+  // We don't know that we have a weak cell. We might have a private symbol
+  // or an AllocationSite, but the memory is safe to examine.
+  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+  // FixedArray.
+  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+  // computed, meaning that it can't appear to be a pointer. If the low bit is
+  // 0, then hash is computed, but the 0 bit prevents the field from appearing
+  // to be a pointer.
+  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+                    WeakCell::kValueOffset &&
+                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+  __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
+  __ j(not_equal, &extra_checks_or_miss);
+
+  // The compare above could have been a SMI/SMI comparison. Guard against this
+  // convincing us that we have a monomorphic JSFunction.
+  __ JumpIfSmi(edi, &extra_checks_or_miss);
+
+  // Increment the call count for monomorphic function calls.
+  __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
+  __ bind(&call_function);
+  __ Set(eax, argc);
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&extra_checks_or_miss);
+  Label uninitialized, miss, not_allocation_site;
+
   __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-  __ j(equal, &slow_start);
+  __ j(equal, &call);
+
+  // Check if we have an allocation site.
+  __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
+                 Heap::kAllocationSiteMapRootIndex);
+  __ j(not_equal, &not_allocation_site);
+
+  // We have an allocation site.
+  HandleArrayCase(masm, &miss);
+
+  __ bind(&not_allocation_site);
 
   // The following cases attempt to handle MISS cases without going to the
   // runtime.
@@ -2298,10 +2183,11 @@
   __ mov(
       FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
       Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-  // We have to update statistics for runtime profiling.
-  __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
-  __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
-  __ jmp(&slow_start);
+
+  __ bind(&call);
+  __ Set(eax, argc);
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+          RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
 
@@ -2318,35 +2204,37 @@
   __ cmp(edi, ecx);
   __ j(equal, &miss);
 
-  // Update stats.
-  __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+  // Make sure the function belongs to the same native context.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
+  __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
+  __ cmp(ecx, NativeContextOperand());
+  __ j(not_equal, &miss);
 
-  // Store the function.
-  __ mov(
-      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
-      edi);
+  // Initialize the call counter.
+  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize),
+         Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
 
-  // Update the write barrier.
-  __ mov(eax, edi);
-  __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&have_js_function);
+  // Store the function. Use a stub since we need a frame for allocation.
+  // ebx - vector
+  // edx - slot
+  // edi - function
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    CreateWeakCellStub create_stub(isolate);
+    __ push(edi);
+    __ CallStub(&create_stub);
+    __ pop(edi);
+  }
+
+  __ jmp(&call_function);
 
   // We are here because tracing is on or we encountered a MISS case we can't
   // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
-  // the slow case
-  __ bind(&slow_start);
-
-  // Check that the function really is a JavaScript function.
-  __ JumpIfSmi(edi, &non_function);
-
-  // Goto slow case if we do not have a function.
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow);
-  __ jmp(&have_js_function);
+  __ jmp(&call);
 
   // Unreachable
   __ int3();
@@ -2354,29 +2242,18 @@
 
 
 void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  // Get the receiver of the function from the stack; 1 ~ return address.
-  __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
+  FrameScope scope(masm, StackFrame::INTERNAL);
 
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push the function and feedback info.
+  __ push(edi);
+  __ push(ebx);
+  __ push(edx);
 
-    // Push the receiver and the function and feedback info.
-    __ push(ecx);
-    __ push(edi);
-    __ push(ebx);
-    __ push(edx);
+  // Call the entry.
+  __ CallRuntime(Runtime::kCallIC_Miss);
 
-    // Call the entry.
-    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
-                                               : IC::kCallIC_Customization_Miss;
-
-    ExternalReference miss = ExternalReference(IC_Utility(id),
-                                               masm->isolate());
-    __ CallExternalReference(miss, 4);
-
-    // Move result to edi and exit the internal frame.
-    __ mov(edi, eax);
-  }
+  // Move result to edi and exit the internal frame.
+  __ mov(edi, eax);
 }
 
 
@@ -2392,8 +2269,11 @@
   // It is important that the store buffer overflow stubs are generated first.
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+  CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+  StoreFastElementStub::GenerateAheadOfTime(isolate);
+  TypeofStub::GenerateAheadOfTime(isolate);
 }
 
 
@@ -2417,11 +2297,23 @@
   // esp: stack pointer  (restored after C call)
   // esi: current context (C callee-saved)
   // edi: JS function of the caller (C callee-saved)
+  //
+  // If argv_in_register():
+  // ecx: pointer to the first argument
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles());
+  if (argv_in_register()) {
+    DCHECK(!save_doubles());
+    __ EnterApiExitFrame(3);
+
+    // Move argc and argv into the correct registers.
+    __ mov(esi, ecx);
+    __ mov(edi, eax);
+  } else {
+    __ EnterExitFrame(save_doubles());
+  }
 
   // ebx: pointer to C function  (C callee-saved)
   // ebp: frame pointer  (restored after C call)
@@ -2444,30 +2336,19 @@
   __ call(ebx);
   // Result is in eax or edx:eax - do not destroy these registers!
 
-  // Runtime functions should not return 'the hole'.  Allowing it to escape may
-  // lead to crashes in the IC code later.
-  if (FLAG_debug_code) {
-    Label okay;
-    __ cmp(eax, isolate()->factory()->the_hole_value());
-    __ j(not_equal, &okay, Label::kNear);
-    __ int3();
-    __ bind(&okay);
-  }
-
   // Check result for exception sentinel.
   Label exception_returned;
   __ cmp(eax, isolate()->factory()->exception());
   __ j(equal, &exception_returned);
 
-  ExternalReference pending_exception_address(
-      Isolate::kPendingExceptionAddress, isolate());
-
   // Check that there is no pending exception, otherwise we
   // should have returned the exception sentinel.
   if (FLAG_debug_code) {
     __ push(edx);
     __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
     Label okay;
+    ExternalReference pending_exception_address(
+        Isolate::kPendingExceptionAddress, isolate());
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
     __ j(equal, &okay, Label::kNear);
@@ -2477,30 +2358,55 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles());
+  __ LeaveExitFrame(save_doubles(), !argv_in_register());
   __ ret(0);
 
   // Handling of exception.
   __ bind(&exception_returned);
 
-  // Retrieve the pending exception.
-  __ mov(eax, Operand::StaticVariable(pending_exception_address));
+  ExternalReference pending_handler_context_address(
+      Isolate::kPendingHandlerContextAddress, isolate());
+  ExternalReference pending_handler_code_address(
+      Isolate::kPendingHandlerCodeAddress, isolate());
+  ExternalReference pending_handler_offset_address(
+      Isolate::kPendingHandlerOffsetAddress, isolate());
+  ExternalReference pending_handler_fp_address(
+      Isolate::kPendingHandlerFPAddress, isolate());
+  ExternalReference pending_handler_sp_address(
+      Isolate::kPendingHandlerSPAddress, isolate());
 
-  // Clear the pending exception.
-  __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
-  __ mov(Operand::StaticVariable(pending_exception_address), edx);
+  // Ask the runtime for help to determine the handler. This will set eax to
+  // contain the current pending exception, don't clobber it.
+  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
+                                 isolate());
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(3, eax);
+    __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));  // argc.
+    __ mov(Operand(esp, 1 * kPointerSize), Immediate(0));  // argv.
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(isolate())));
+    __ CallCFunction(find_handler, 3);
+  }
 
-  // Special handling of termination exceptions which are uncatchable
-  // by javascript code.
-  Label throw_termination_exception;
-  __ cmp(eax, isolate()->factory()->termination_exception());
-  __ j(equal, &throw_termination_exception);
+  // Retrieve the handler context, SP and FP.
+  __ mov(esi, Operand::StaticVariable(pending_handler_context_address));
+  __ mov(esp, Operand::StaticVariable(pending_handler_sp_address));
+  __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address));
 
-  // Handle normal exception.
-  __ Throw(eax);
+  // If the handler is a JS frame, restore the context to the frame. Note that
+  // the context will be set to (esi == 0) for non-JS frames.
+  Label skip;
+  __ test(esi, esi);
+  __ j(zero, &skip, Label::kNear);
+  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+  __ bind(&skip);
 
-  __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(eax);
+  // Compute the handler entry address and jump to it.
+  __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
+  __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
+  __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+  __ jmp(edi);
 }
 
 
@@ -2550,10 +2456,9 @@
   __ mov(eax, Immediate(isolate()->factory()->exception()));
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.  There's only one
-  // handler block in this code object, so its index is 0.
+  // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
-  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  __ PushStackHandler();
 
   // Clear any pending exceptions.
   __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
@@ -2579,7 +2484,7 @@
   __ call(edx);
 
   // Unlink this frame from the handler chain.
-  __ PopTryHandler();
+  __ PopStackHandler();
 
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
@@ -2605,227 +2510,120 @@
 }
 
 
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-//   81 ff XX XX XX XX   cmp    edi, <the hole, patched to a map>
-//   75 0a               jne    <some near label>
-//   b8 XX XX XX XX      mov    eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-//   esp[0] : return address
-//   esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
-  // Call site inlining and patching implies arguments in registers.
-  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+  Register const object = edx;                       // Object (lhs).
+  Register const function = eax;                     // Function (rhs).
+  Register const object_map = ecx;                   // Map of {object}.
+  Register const function_map = ebx;                 // Map of {function}.
+  Register const function_prototype = function_map;  // Prototype of {function}.
+  Register const scratch = edi;
 
-  // Fixed register usage throughout the stub.
-  Register object = eax;  // Object (lhs).
-  Register map = ebx;  // Map of the object.
-  Register function = edx;  // Function (rhs).
-  Register prototype = edi;  // Prototype of the function.
-  Register scratch = ecx;
+  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
 
-  // Constants describing the call site code to patch.
-  static const int kDeltaToCmpImmediate = 2;
-  static const int kDeltaToMov = 8;
-  static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
-  static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
-  static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
+  // Check if {object} is a smi.
+  Label object_is_smi;
+  __ JumpIfSmi(object, &object_is_smi, Label::kNear);
 
-  DCHECK_EQ(object.code(), InstanceofStub::left().code());
-  DCHECK_EQ(function.code(), InstanceofStub::right().code());
+  // Lookup the {function} and the {object} map in the global instanceof cache.
+  // Note: This is safe because we clear the global instanceof cache whenever
+  // we change the prototype of any object.
+  Label fast_case, slow_case;
+  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+  __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+  __ j(not_equal, &fast_case, Label::kNear);
+  __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
+  __ j(not_equal, &fast_case, Label::kNear);
+  __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+  __ ret(0);
 
-  // Get the object and function - they are always both needed.
-  Label slow, not_js_object;
-  if (!HasArgsInRegisters()) {
-    __ mov(object, Operand(esp, 2 * kPointerSize));
-    __ mov(function, Operand(esp, 1 * kPointerSize));
-  }
+  // If {object} is a smi we can safely return false if {function} is a JS
+  // function, otherwise we have to miss to the runtime and throw an exception.
+  __ bind(&object_is_smi);
+  __ JumpIfSmi(function, &slow_case);
+  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+  __ j(not_equal, &slow_case);
+  __ LoadRoot(eax, Heap::kFalseValueRootIndex);
+  __ ret(0);
 
-  // Check that the left hand is a JS object.
-  __ JumpIfSmi(object, &not_js_object);
-  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+  // Fast-case: The {function} must be a valid JSFunction.
+  __ bind(&fast_case);
+  __ JumpIfSmi(function, &slow_case);
+  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+  __ j(not_equal, &slow_case);
 
-  // If there is a call site cache don't look in the global cache, but do the
-  // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
-    // Look up the function and the map in the instanceof cache.
-    Label miss;
-    __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-    __ j(not_equal, &miss, Label::kNear);
-    __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
-    __ j(not_equal, &miss, Label::kNear);
-    __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
-    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-    __ bind(&miss);
-  }
+  // Ensure that {function} has an instance prototype.
+  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+            static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+  __ j(not_zero, &slow_case);
 
-  // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+  // Get the "prototype" (or initial map) of the {function}.
+  __ mov(function_prototype,
+         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  __ AssertNotSmi(function_prototype);
 
-  // Check that the function prototype is a JS object.
-  __ JumpIfSmi(prototype, &slow);
-  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+  // Resolve the prototype if the {function} has an initial map.  Afterwards the
+  // {function_prototype} will be either the JSReceiver prototype object or the
+  // hole value, which means that no instances of the {function} were created so
+  // far and hence we should return false.
+  Label function_prototype_valid;
+  Register const function_prototype_map = scratch;
+  __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
+  __ j(not_equal, &function_prototype_valid, Label::kNear);
+  __ mov(function_prototype,
+         FieldOperand(function_prototype, Map::kPrototypeOffset));
+  __ bind(&function_prototype_valid);
+  __ AssertNotSmi(function_prototype);
 
-  // Update the global instanceof or call site inlined cache with the current
-  // map and function. The cached answer will be set when it is known below.
-  if (!HasCallSiteInlineCheck()) {
-    __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
-    __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  } else {
-    // The constants for the code patching are based on no push instructions
-    // at the call site.
-    DCHECK(HasArgsInRegisters());
-    // Get return address and delta to inlined map check.
-    __ mov(scratch, Operand(esp, 0 * kPointerSize));
-    __ sub(scratch, Operand(esp, 1 * kPointerSize));
-    if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
-      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
-      __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
-      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
-    }
-    __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
-    __ mov(Operand(scratch, 0), map);
-  }
+  // Update the global instanceof cache with the current {object} map and
+  // {function}.  The cached answer will be set when it is known below.
+  __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+  __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
 
-  // Loop through the prototype chain of the object looking for the function
-  // prototype.
-  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
-  Label loop, is_instance, is_not_instance;
+  // Loop through the prototype chain looking for the {function} prototype.
+  // Assume true, and change to false if not found.
+  Label done, loop, fast_runtime_fallback;
+  __ mov(eax, isolate()->factory()->true_value());
   __ bind(&loop);
-  __ cmp(scratch, prototype);
-  __ j(equal, &is_instance, Label::kNear);
-  Factory* factory = isolate()->factory();
-  __ cmp(scratch, Immediate(factory->null_value()));
-  __ j(equal, &is_not_instance, Label::kNear);
-  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
-  __ jmp(&loop);
 
-  __ bind(&is_instance);
-  if (!HasCallSiteInlineCheck()) {
-    __ mov(eax, Immediate(0));
-    __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
-    if (ReturnTrueFalseObject()) {
-      __ mov(eax, factory->true_value());
-    }
-  } else {
-    // Get return address and delta to inlined map check.
-    __ mov(eax, factory->true_value());
-    __ mov(scratch, Operand(esp, 0 * kPointerSize));
-    __ sub(scratch, Operand(esp, 1 * kPointerSize));
-    if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
-      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
-    }
-    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
-    if (!ReturnTrueFalseObject()) {
-      __ Move(eax, Immediate(0));
-    }
-  }
-  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+  // Check if the object needs to be access checked.
+  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+            1 << Map::kIsAccessCheckNeeded);
+  __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+  // Check if the current object is a Proxy.
+  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+  __ j(equal, &fast_runtime_fallback, Label::kNear);
 
-  __ bind(&is_not_instance);
-  if (!HasCallSiteInlineCheck()) {
-    __ mov(eax, Immediate(Smi::FromInt(1)));
-    __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
-    if (ReturnTrueFalseObject()) {
-      __ mov(eax, factory->false_value());
-    }
-  } else {
-    // Get return address and delta to inlined map check.
-    __ mov(eax, factory->false_value());
-    __ mov(scratch, Operand(esp, 0 * kPointerSize));
-    __ sub(scratch, Operand(esp, 1 * kPointerSize));
-    if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
-      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
-    }
-    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
-    if (!ReturnTrueFalseObject()) {
-      __ Move(eax, Immediate(Smi::FromInt(1)));
-    }
-  }
-  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+  __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
+  __ cmp(object, function_prototype);
+  __ j(equal, &done, Label::kNear);
+  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+  __ cmp(object, isolate()->factory()->null_value());
+  __ j(not_equal, &loop);
+  __ mov(eax, isolate()->factory()->false_value());
 
-  Label object_not_null, object_not_null_or_smi;
-  __ bind(&not_js_object);
-  // Before null, smi and string value checks, check that the rhs is a function
-  // as for a non-function rhs an exception needs to be thrown.
-  __ JumpIfSmi(function, &slow, Label::kNear);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
-  __ j(not_equal, &slow, Label::kNear);
+  __ bind(&done);
+  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+  __ ret(0);
 
-  // Null is not instance of anything.
-  __ cmp(object, factory->null_value());
-  __ j(not_equal, &object_not_null, Label::kNear);
-  if (ReturnTrueFalseObject()) {
-    __ mov(eax, factory->false_value());
-  } else {
-    __ Move(eax, Immediate(Smi::FromInt(1)));
-  }
-  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+  // Found Proxy or access check needed: Call the runtime.
+  __ bind(&fast_runtime_fallback);
+  __ PopReturnAddressTo(scratch);
+  __ Push(object);
+  __ Push(function_prototype);
+  __ PushReturnAddressFrom(scratch);
+  // Invalidate the instanceof cache.
+  __ Move(eax, Immediate(Smi::FromInt(0)));
+  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
 
-  __ bind(&object_not_null);
-  // Smi values is not instance of anything.
-  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
-  if (ReturnTrueFalseObject()) {
-    __ mov(eax, factory->false_value());
-  } else {
-    __ Move(eax, Immediate(Smi::FromInt(1)));
-  }
-  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
-  __ bind(&object_not_null_or_smi);
-  // String values is not instance of anything.
-  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
-  __ j(NegateCondition(is_string), &slow, Label::kNear);
-  if (ReturnTrueFalseObject()) {
-    __ mov(eax, factory->false_value());
-  } else {
-    __ Move(eax, Immediate(Smi::FromInt(1)));
-  }
-  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
-  // Slow-case: Go through the JavaScript implementation.
-  __ bind(&slow);
-  if (!ReturnTrueFalseObject()) {
-    // Tail call the builtin which returns 0 or 1.
-    if (HasArgsInRegisters()) {
-      // Push arguments below return address.
-      __ pop(scratch);
-      __ push(object);
-      __ push(function);
-      __ push(scratch);
-    }
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-  } else {
-    // Call the builtin and convert 0/1 to true/false.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(object);
-      __ push(function);
-      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    }
-    Label true_value, done;
-    __ test(eax, eax);
-    __ j(zero, &true_value, Label::kNear);
-    __ mov(eax, factory->false_value());
-    __ jmp(&done, Label::kNear);
-    __ bind(&true_value);
-    __ mov(eax, factory->true_value());
-    __ bind(&done);
-    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-  }
+  // Slow-case: Call the %InstanceOf runtime function.
+  __ bind(&slow_case);
+  __ PopReturnAddressTo(scratch);
+  __ Push(object);
+  __ Push(function);
+  __ PushReturnAddressFrom(scratch);
+  __ TailCallRuntime(Runtime::kInstanceOf);
 }
 
 
@@ -2867,7 +2665,7 @@
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
+    MacroAssembler* masm, EmbedMode embed_mode,
     const RuntimeCallHelper& call_helper) {
   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
 
@@ -2879,14 +2677,18 @@
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
+  if (embed_mode == PART_OF_IC_HANDLER) {
+    __ push(LoadWithVectorDescriptor::VectorRegister());
+    __ push(LoadDescriptor::SlotRegister());
+  }
   __ push(object_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
   } else {
     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi);
   }
   if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
@@ -2894,6 +2696,10 @@
     __ mov(index_, eax);
   }
   __ pop(object_);
+  if (embed_mode == PART_OF_IC_HANDLER) {
+    __ pop(LoadDescriptor::SlotRegister());
+    __ pop(LoadWithVectorDescriptor::VectorRegister());
+  }
   // Reload the instance type.
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -2912,7 +2718,7 @@
   __ push(object_);
   __ SmiTag(index_);
   __ push(index_);
-  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT);
   if (!result_.is(eax)) {
     __ mov(result_, eax);
   }
@@ -2930,10 +2736,9 @@
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
-  __ test(code_,
-          Immediate(kSmiTagMask |
-                    ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+  __ test(code_, Immediate(kSmiTagMask |
+                           ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
   __ j(not_zero, &slow_case_);
 
   Factory* factory = masm->isolate()->factory();
@@ -2959,7 +2764,7 @@
   __ bind(&slow_case_);
   call_helper.BeforeCall(masm);
   __ push(code_);
-  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ CallRuntime(Runtime::kStringCharFromCode);
   if (!result_.is(eax)) {
     __ mov(result_, eax);
   }
@@ -3209,7 +3014,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString);
 
   __ bind(&single_char);
   // eax: string
@@ -3254,7 +3059,7 @@
   __ pop(ecx);   // Pop return address.
   __ push(eax);  // Push argument.
   __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ TailCallRuntime(Runtime::kStringToNumber);
   __ bind(&not_string);
 
   Label not_oddball;
@@ -3267,7 +3072,61 @@
   __ pop(ecx);   // Pop return address.
   __ push(eax);  // Push argument.
   __ push(ecx);  // Push return address.
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+  __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+  // The ToLength stub takes on argument in eax.
+  Label not_smi, positive_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(eax, eax);
+  __ j(greater_equal, &positive_smi, Label::kNear);
+  __ xor_(eax, eax);
+  __ bind(&positive_smi);
+  __ Ret();
+  __ bind(&not_smi);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kToLength);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+  // The ToString stub takes one argument in eax.
+  Label is_number;
+  __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+  Label not_string;
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+  // eax: receiver
+  // edi: receiver map
+  __ j(above_equal, &not_string, Label::kNear);
+  __ Ret();
+  __ bind(&not_string);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kToString);
 }
 
 
@@ -3397,42 +3256,40 @@
 
 
 void StringCompareStub::Generate(MacroAssembler* masm) {
-  Label runtime;
-
-  // Stack frame on entry.
-  //  esp[0]: return address
-  //  esp[4]: right string
-  //  esp[8]: left string
-
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
-  __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
+  // ----------- S t a t e -------------
+  //  -- edx    : left string
+  //  -- eax    : right string
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertString(edx);
+  __ AssertString(eax);
 
   Label not_same;
   __ cmp(edx, eax);
   __ j(not_equal, &not_same, Label::kNear);
-  STATIC_ASSERT(EQUAL == 0);
-  STATIC_ASSERT(kSmiTag == 0);
   __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  __ ret(2 * kPointerSize);
+  __ Ret();
 
   __ bind(&not_same);
 
   // Check that both objects are sequential one-byte strings.
+  Label runtime;
   __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
 
   // Compare flat one-byte strings.
-  // Drop arguments from the stack.
-  __ pop(ecx);
-  __ add(esp, Immediate(2 * kPointerSize));
-  __ push(ecx);
+  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
   StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
                                                   edi);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(eax);
+  __ PushReturnAddressFrom(ecx);
+  __ TailCallRuntime(Runtime::kStringCompare);
 }
 
 
@@ -3464,6 +3321,39 @@
 }
 
 
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+  DCHECK_EQ(CompareICState::BOOLEAN, state());
+  Label miss;
+  Label::Distance const miss_distance =
+      masm->emit_debug_code() ? Label::kFar : Label::kNear;
+
+  __ JumpIfSmi(edx, &miss, miss_distance);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ JumpIfSmi(eax, &miss, miss_distance);
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+  __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+  if (op() != Token::EQ_STRICT && is_strong(strength())) {
+    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
+  } else {
+    if (!Token::IsEqualityOp(op())) {
+      __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+      __ AssertSmi(eax);
+      __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+      __ AssertSmi(edx);
+      __ push(eax);
+      __ mov(eax, edx);
+      __ pop(edx);
+    }
+    __ sub(eax, edx);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   DCHECK(state() == CompareICState::SMI);
   Label miss;
@@ -3547,7 +3437,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3742,9 +3632,9 @@
   __ push(right);
   __ push(tmp1);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+    __ TailCallRuntime(Runtime::kStringEquals);
   } else {
-    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare);
   }
 
   __ bind(&miss);
@@ -3752,19 +3642,20 @@
 }
 
 
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
-  DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+  DCHECK_EQ(CompareICState::RECEIVER, state());
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
-  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, Label::kNear);
-  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, Label::kNear);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+  __ j(below, &miss, Label::kNear);
+  __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
+  __ j(below, &miss, Label::kNear);
 
-  DCHECK(GetCondition() == equal);
+  DCHECK_EQ(equal, GetCondition());
   __ sub(eax, edx);
   __ ret(0);
 
@@ -3773,21 +3664,32 @@
 }
 
 
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   Label miss;
+  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   __ mov(ecx, edx);
   __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ cmp(ecx, known_map_);
+  __ GetWeakValue(edi, cell);
+  __ cmp(edi, FieldOperand(eax, HeapObject::kMapOffset));
   __ j(not_equal, &miss, Label::kNear);
-  __ cmp(ebx, known_map_);
+  __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ j(not_equal, &miss, Label::kNear);
 
-  __ sub(eax, edx);
-  __ ret(0);
+  if (Token::IsEqualityOp(op())) {
+    __ sub(eax, edx);
+    __ ret(0);
+  } else if (is_strong(strength())) {
+    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
+  } else {
+    __ PopReturnAddressTo(ecx);
+    __ Push(edx);
+    __ Push(eax);
+    __ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kCompare);
+  }
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3797,15 +3699,13 @@
 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
-    ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                               isolate());
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ push(edx);  // Preserve edx and eax.
     __ push(eax);
     __ push(edx);  // And also use them as the arguments.
     __ push(eax);
     __ push(Immediate(Smi::FromInt(op())));
-    __ CallExternalReference(miss, 3);
+    __ CallRuntime(Runtime::kCompareIC_Miss);
     // Compute the entry point of the rewritten stub.
     __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
     __ pop(eax);
@@ -3846,11 +3746,11 @@
                                    NameDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
-    DCHECK(NameDictionary::kEntrySize == 3);
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
     Register entity_name = r0;
     // Having undefined at this place means the name is not contained.
-    DCHECK_EQ(kSmiTagSize, 1);
+    STATIC_ASSERT(kSmiTagSize == 1);
     __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
                                 kElementsStartOffset - kHeapObjectTag));
     __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
@@ -3918,7 +3818,7 @@
     __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
-    DCHECK(NameDictionary::kEntrySize == 3);
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
 
     // Check if the key is identical to the name.
@@ -3981,11 +3881,11 @@
     __ and_(scratch, Operand(esp, 0));
 
     // Scale the index by multiplying by the entry size.
-    DCHECK(NameDictionary::kEntrySize == 3);
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     __ lea(index(), Operand(scratch, scratch, times_2, 0));  // index *= 3.
 
     // Having undefined at this place means the name is not contained.
-    DCHECK_EQ(kSmiTagSize, 1);
+    STATIC_ASSERT(kSmiTagSize == 1);
     __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
                             kElementsStartOffset - kHeapObjectTag));
     __ cmp(scratch, isolate()->factory()->undefined_value());
@@ -4196,11 +4096,10 @@
   // We need an extra register for this, so we push the object register
   // temporarily.
   __ push(regs_.object());
-  __ EnsureNotWhite(regs_.scratch0(),  // The value.
-                    regs_.scratch1(),  // Scratch.
-                    regs_.object(),  // Scratch.
-                    &need_incremental_pop_object,
-                    Label::kNear);
+  __ JumpIfWhite(regs_.scratch0(),  // The value.
+                 regs_.scratch1(),  // Scratch.
+                 regs_.object(),    // Scratch.
+                 &need_incremental_pop_object, Label::kNear);
   __ pop(regs_.object());
 
   regs_.Restore(masm);
@@ -4220,91 +4119,6 @@
 }
 
 
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : element value to store
-  //  -- ecx    : element index as smi
-  //  -- esp[0] : return address
-  //  -- esp[4] : array literal index in function
-  //  -- esp[8] : array literal
-  // clobbers ebx, edx, edi
-  // -----------------------------------
-
-  Label element_done;
-  Label double_elements;
-  Label smi_element;
-  Label slow_elements;
-  Label slow_elements_from_double;
-  Label fast_elements;
-
-  // Get array literal index, array literal and its map.
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-  __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-
-  __ CheckFastElements(edi, &double_elements);
-
-  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
-  __ JumpIfSmi(eax, &smi_element);
-  __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
-  // Store into the array literal requires a elements transition. Call into
-  // the runtime.
-
-  __ bind(&slow_elements);
-  __ pop(edi);  // Pop return address and remember to put back later for tail
-                // call.
-  __ push(ebx);
-  __ push(ecx);
-  __ push(eax);
-  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
-  __ push(edx);
-  __ push(edi);  // Return return address so that tail call returns to right
-                 // place.
-  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
-  __ bind(&slow_elements_from_double);
-  __ pop(edx);
-  __ jmp(&slow_elements);
-
-  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
-  __ bind(&fast_elements);
-  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
-                           FixedArrayBase::kHeaderSize));
-  __ mov(Operand(ecx, 0), eax);
-  // Update the write barrier for the array store.
-  __ RecordWrite(ebx, ecx, eax,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
-  // and value is Smi.
-  __ bind(&smi_element);
-  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
-                      FixedArrayBase::kHeaderSize), eax);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
-  __ bind(&double_elements);
-
-  __ push(edx);
-  __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ StoreNumberToDoubleElements(eax,
-                                 edx,
-                                 ecx,
-                                 edi,
-                                 xmm0,
-                                 &slow_elements_from_double);
-  __ pop(edx);
-  __ ret(0);
-}
-
-
 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4321,15 +4135,621 @@
 
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
-  VectorLoadStub stub(isolate(), state());
-  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+  LoadICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
-  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
-  VectorKeyedLoadStub stub(isolate());
+  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+  KeyedLoadICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+                             Register key, Register vector, Register slot,
+                             Register feedback, bool is_polymorphic,
+                             Label* miss) {
+  // feedback initially contains the feedback array
+  Label next, next_loop, prepare_next;
+  Label load_smi_map, compare_map;
+  Label start_polymorphic;
+
+  __ push(receiver);
+  __ push(vector);
+
+  Register receiver_map = receiver;
+  Register cached_map = vector;
+
+  // Receiver might not be a heap object.
+  __ JumpIfSmi(receiver, &load_smi_map);
+  __ mov(receiver_map, FieldOperand(receiver, 0));
+  __ bind(&compare_map);
+  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+  // A named keyed load might have a 2 element array, all other cases can count
+  // on an array with at least 2 {map, handler} pairs, so they can go right
+  // into polymorphic array handling.
+  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
+
+  // found, now call handler.
+  Register handler = feedback;
+  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+  __ pop(vector);
+  __ pop(receiver);
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ jmp(handler);
+
+  if (!is_polymorphic) {
+    __ bind(&next);
+    __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
+           Immediate(Smi::FromInt(2)));
+    __ j(not_equal, &start_polymorphic);
+    __ pop(vector);
+    __ pop(receiver);
+    __ jmp(miss);
+  }
+
+  // Polymorphic, we have to loop from 2 to N
+  __ bind(&start_polymorphic);
+  __ push(key);
+  Register counter = key;
+  __ mov(counter, Immediate(Smi::FromInt(2)));
+  __ bind(&next_loop);
+  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+                                  FixedArray::kHeaderSize));
+  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  __ j(not_equal, &prepare_next);
+  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+                               FixedArray::kHeaderSize + kPointerSize));
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ jmp(handler);
+
+  __ bind(&prepare_next);
+  __ add(counter, Immediate(Smi::FromInt(2)));
+  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+  __ j(less, &next_loop);
+
+  // We exhausted our array of map handler pairs.
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ jmp(miss);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+                                  Register key, Register vector, Register slot,
+                                  Register weak_cell, Label* miss) {
+  // feedback initially contains the feedback array
+  Label compare_smi_map;
+
+  // Move the weak map into the weak_cell register.
+  Register ic_map = weak_cell;
+  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+  // Receiver might not be a heap object.
+  __ JumpIfSmi(receiver, &compare_smi_map);
+  __ cmp(ic_map, FieldOperand(receiver, 0));
+  __ j(not_equal, miss);
+  Register handler = weak_cell;
+  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize + kPointerSize));
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ jmp(handler);
+
+  // In microbenchmarks, it made sense to unroll this code so that the call to
+  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+  __ bind(&compare_smi_map);
+  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, miss);
+  __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize + kPointerSize));
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ jmp(handler);
+}
+
+
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
+
+
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
+  Register name = LoadWithVectorDescriptor::NameRegister();          // ecx
+  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
+  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
+  Register scratch = edi;
+  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize));
+
+  // Is it a weak cell?
+  Label try_array;
+  Label not_array, smi_key, key_okay, miss;
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+  __ j(not_equal, &try_array);
+  HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
+
+  // Is it a fixed array?
+  __ bind(&try_array);
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &not_array);
+  HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
+
+  __ bind(&not_array);
+  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+  __ j(not_equal, &miss);
+  __ push(slot);
+  __ push(vector);
+  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+                                               receiver, name, vector, scratch);
+  __ pop(vector);
+  __ pop(slot);
+
+  __ bind(&miss);
+  LoadIC::GenerateMiss(masm);
+}
+
+
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // edx
+  Register key = LoadWithVectorDescriptor::NameRegister();           // ecx
+  Register vector = LoadWithVectorDescriptor::VectorRegister();      // ebx
+  Register slot = LoadWithVectorDescriptor::SlotRegister();          // eax
+  Register feedback = edi;
+  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+                                FixedArray::kHeaderSize));
+  // Is it a weak cell?
+  Label try_array;
+  Label not_array, smi_key, key_okay, miss;
+  __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
+  __ j(not_equal, &try_array);
+  HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
+
+  __ bind(&try_array);
+  // Is it a fixed array?
+  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &not_array);
+
+  // We have a polymorphic element handler.
+  Label polymorphic, try_poly_name;
+  __ bind(&polymorphic);
+  HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
+
+  __ bind(&not_array);
+  // Is it generic?
+  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+  __ j(not_equal, &try_poly_name);
+  Handle<Code> megamorphic_stub =
+      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+  __ bind(&try_poly_name);
+  // We might have a name in feedback, and a fixed array in the next slot.
+  __ cmp(key, feedback);
+  __ j(not_equal, &miss);
+  // If the name comparison succeeded, we know we have a fixed array with
+  // at least one map/handler pair.
+  __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+                                FixedArray::kHeaderSize + kPointerSize));
+  HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
+
+  __ bind(&miss);
+  KeyedLoadIC::GenerateMiss(masm);
+}
+
+
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+  VectorStoreICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+  VectorKeyedStoreICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+
+// value is on the stack already.
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
+                                       Register key, Register vector,
+                                       Register slot, Register feedback,
+                                       bool is_polymorphic, Label* miss) {
+  // feedback initially contains the feedback array
+  Label next, next_loop, prepare_next;
+  Label load_smi_map, compare_map;
+  Label start_polymorphic;
+  Label pop_and_miss;
+  ExternalReference virtual_register =
+      ExternalReference::virtual_handler_register(masm->isolate());
+
+  __ push(receiver);
+  __ push(vector);
+
+  Register receiver_map = receiver;
+  Register cached_map = vector;
+
+  // Receiver might not be a heap object.
+  __ JumpIfSmi(receiver, &load_smi_map);
+  __ mov(receiver_map, FieldOperand(receiver, 0));
+  __ bind(&compare_map);
+  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+  // A named keyed store might have a 2 element array, all other cases can count
+  // on an array with at least 2 {map, handler} pairs, so they can go right
+  // into polymorphic array handling.
+  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  __ j(not_equal, &start_polymorphic);
+
+  // found, now call handler.
+  Register handler = feedback;
+  DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister()));
+  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+  __ pop(vector);
+  __ pop(receiver);
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ mov(Operand::StaticVariable(virtual_register), handler);
+  __ pop(handler);  // Pop "value".
+  __ jmp(Operand::StaticVariable(virtual_register));
+
+  // Polymorphic, we have to loop from 2 to N
+  __ bind(&start_polymorphic);
+  __ push(key);
+  Register counter = key;
+  __ mov(counter, Immediate(Smi::FromInt(2)));
+
+  if (!is_polymorphic) {
+    // If is_polymorphic is false, we may only have a two element array.
+    // Check against length now in that case.
+    __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+    __ j(greater_equal, &pop_and_miss);
+  }
+
+  __ bind(&next_loop);
+  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+                                  FixedArray::kHeaderSize));
+  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  __ j(not_equal, &prepare_next);
+  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+                               FixedArray::kHeaderSize + kPointerSize));
+  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ mov(Operand::StaticVariable(virtual_register), handler);
+  __ pop(handler);  // Pop "value".
+  __ jmp(Operand::StaticVariable(virtual_register));
+
+  __ bind(&prepare_next);
+  __ add(counter, Immediate(Smi::FromInt(2)));
+  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+  __ j(less, &next_loop);
+
+  // We exhausted our array of map handler pairs.
+  __ bind(&pop_and_miss);
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ jmp(miss);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
+                                       Register key, Register vector,
+                                       Register slot, Register weak_cell,
+                                       Label* miss) {
+  // The store ic value is on the stack.
+  DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
+  ExternalReference virtual_register =
+      ExternalReference::virtual_handler_register(masm->isolate());
+
+  // feedback initially contains the feedback array
+  Label compare_smi_map;
+
+  // Move the weak map into the weak_cell register.
+  Register ic_map = weak_cell;
+  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+  // Receiver might not be a heap object.
+  __ JumpIfSmi(receiver, &compare_smi_map);
+  __ cmp(ic_map, FieldOperand(receiver, 0));
+  __ j(not_equal, miss);
+  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+                                 FixedArray::kHeaderSize + kPointerSize));
+  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+  // Put the store ic value back in it's register.
+  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+  __ pop(weak_cell);  // Pop "value".
+  // jump to the handler.
+  __ jmp(Operand::StaticVariable(virtual_register));
+
+  // In microbenchmarks, it made sense to unroll this code so that the call to
+  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+  __ bind(&compare_smi_map);
+  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, miss);
+  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+                                 FixedArray::kHeaderSize + kPointerSize));
+  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+  __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+  __ pop(weak_cell);  // Pop "value".
+  // jump to the handler.
+  __ jmp(Operand::StaticVariable(virtual_register));
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // edx
+  Register key = VectorStoreICDescriptor::NameRegister();           // ecx
+  Register value = VectorStoreICDescriptor::ValueRegister();        // eax
+  Register vector = VectorStoreICDescriptor::VectorRegister();      // ebx
+  Register slot = VectorStoreICDescriptor::SlotRegister();          // edi
+  Label miss;
+
+  __ push(value);
+
+  Register scratch = value;
+  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize));
+
+  // Is it a weak cell?
+  Label try_array;
+  Label not_array, smi_key, key_okay;
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+  __ j(not_equal, &try_array);
+  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+  // Is it a fixed array?
+  __ bind(&try_array);
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &not_array);
+  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
+                             &miss);
+
+  __ bind(&not_array);
+  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+  __ j(not_equal, &miss);
+
+  __ pop(value);
+  __ push(slot);
+  __ push(vector);
+  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+                                               receiver, key, slot, no_reg);
+  __ pop(vector);
+  __ pop(slot);
+  Label no_pop_miss;
+  __ jmp(&no_pop_miss);
+
+  __ bind(&miss);
+  __ pop(value);
+  __ bind(&no_pop_miss);
+  StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+
+static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
+                                            Register receiver, Register key,
+                                            Register vector, Register slot,
+                                            Register feedback, Label* miss) {
+  // feedback initially contains the feedback array
+  Label next, next_loop, prepare_next;
+  Label load_smi_map, compare_map;
+  Label transition_call;
+  Label pop_and_miss;
+  ExternalReference virtual_register =
+      ExternalReference::virtual_handler_register(masm->isolate());
+  ExternalReference virtual_slot =
+      ExternalReference::virtual_slot_register(masm->isolate());
+
+  __ push(receiver);
+  __ push(vector);
+
+  Register receiver_map = receiver;
+  Register cached_map = vector;
+  Register value = StoreDescriptor::ValueRegister();
+
+  // Receiver might not be a heap object.
+  __ JumpIfSmi(receiver, &load_smi_map);
+  __ mov(receiver_map, FieldOperand(receiver, 0));
+  __ bind(&compare_map);
+
+  // Polymorphic, we have to loop from 0 to N - 1
+  __ push(key);
+  // Current stack layout:
+  // - esp[0]    -- key
+  // - esp[4]    -- vector
+  // - esp[8]    -- receiver
+  // - esp[12]   -- value
+  // - esp[16]   -- return address
+  //
+  // Required stack layout for handler call:
+  // - esp[0]    -- return address
+  // - receiver, key, value, vector, slot in registers.
+  // - handler in virtual register.
+  Register counter = key;
+  __ mov(counter, Immediate(Smi::FromInt(0)));
+  __ bind(&next_loop);
+  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+                                  FixedArray::kHeaderSize));
+  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  __ j(not_equal, &prepare_next);
+  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+                                  FixedArray::kHeaderSize + kPointerSize));
+  __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &transition_call);
+  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+                                FixedArray::kHeaderSize + 2 * kPointerSize));
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+  __ mov(Operand::StaticVariable(virtual_register), feedback);
+  __ pop(value);
+  __ jmp(Operand::StaticVariable(virtual_register));
+
+  __ bind(&transition_call);
+  // Current stack layout:
+  // - esp[0]    -- key
+  // - esp[4]    -- vector
+  // - esp[8]    -- receiver
+  // - esp[12]   -- value
+  // - esp[16]   -- return address
+  //
+  // Required stack layout for handler call:
+  // - esp[0]    -- return address
+  // - receiver, key, value, map, vector in registers.
+  // - handler and slot in virtual registers.
+  __ mov(Operand::StaticVariable(virtual_slot), slot);
+  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+                                FixedArray::kHeaderSize + 2 * kPointerSize));
+  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+  __ mov(Operand::StaticVariable(virtual_register), feedback);
+
+  __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+  // The weak cell may have been cleared.
+  __ JumpIfSmi(cached_map, &pop_and_miss);
+  DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+  __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+
+  // Pop key into place.
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ pop(value);
+  __ jmp(Operand::StaticVariable(virtual_register));
+
+  __ bind(&prepare_next);
+  __ add(counter, Immediate(Smi::FromInt(3)));
+  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+  __ j(less, &next_loop);
+
+  // We exhausted our array of map handler pairs.
+  __ bind(&pop_and_miss);
+  __ pop(key);
+  __ pop(vector);
+  __ pop(receiver);
+  __ jmp(miss);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ jmp(&compare_map);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // edx
+  Register key = VectorStoreICDescriptor::NameRegister();           // ecx
+  Register value = VectorStoreICDescriptor::ValueRegister();        // eax
+  Register vector = VectorStoreICDescriptor::VectorRegister();      // ebx
+  Register slot = VectorStoreICDescriptor::SlotRegister();          // edi
+  Label miss;
+
+  __ push(value);
+
+  Register scratch = value;
+  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize));
+
+  // Is it a weak cell?
+  Label try_array;
+  Label not_array, smi_key, key_okay;
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+  __ j(not_equal, &try_array);
+  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+  // Is it a fixed array?
+  __ bind(&try_array);
+  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &not_array);
+  HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
+                                  &miss);
+
+  __ bind(&not_array);
+  Label try_poly_name;
+  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+  __ j(not_equal, &try_poly_name);
+
+  __ pop(value);
+
+  Handle<Code> megamorphic_stub =
+      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+  __ bind(&try_poly_name);
+  // We might have a name in feedback, and a fixed array in the next slot.
+  __ cmp(key, scratch);
+  __ j(not_equal, &miss);
+  // If the name comparison succeeded, we know we have a fixed array with
+  // at least one map/handler pair.
+  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+                               FixedArray::kHeaderSize + kPointerSize));
+  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
+                             &miss);
+
+  __ bind(&miss);
+  __ pop(value);
+  KeyedStoreIC::GenerateMiss(masm);
+}
+
+
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(ebx);
+  CallICStub stub(isolate(), state());
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 }
 
@@ -4413,12 +4833,12 @@
   // esp[4] - last argument
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    DCHECK(FAST_SMI_ELEMENTS == 0);
-    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
-    DCHECK(FAST_ELEMENTS == 2);
-    DCHECK(FAST_HOLEY_ELEMENTS == 3);
-    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
-    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ test_b(edx, 1);
@@ -4555,9 +4975,10 @@
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax : argc (only if argument_count() == ANY)
+  //  -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
   //  -- ebx : AllocationSite or undefined
   //  -- edi : constructor
+  //  -- edx : Original constructor
   //  -- esp[0] : return address
   //  -- esp[4] : last argument
   // -----------------------------------
@@ -4577,6 +4998,14 @@
     __ AssertUndefinedOrAllocationSite(ebx);
   }
 
+  Label subclassing;
+
+  // Enter the context of the Array function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  __ cmp(edx, edi);
+  __ j(not_equal, &subclassing);
+
   Label no_info;
   // If the feedback vector is the undefined value call an array constructor
   // that doesn't use AllocationSites.
@@ -4592,6 +5021,29 @@
 
   __ bind(&no_info);
   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+  // Subclassing.
+  __ bind(&subclassing);
+  switch (argument_count()) {
+    case ANY:
+    case MORE_THAN_ONE:
+      __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+      __ add(eax, Immediate(3));
+      break;
+    case NONE:
+      __ mov(Operand(esp, 1 * kPointerSize), edi);
+      __ mov(eax, Immediate(3));
+      break;
+    case ONE:
+      __ mov(Operand(esp, 2 * kPointerSize), edi);
+      __ mov(eax, Immediate(4));
+      break;
+  }
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(ebx);
+  __ PushReturnAddressFrom(ecx);
+  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
 
 
@@ -4681,13 +5133,355 @@
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+  Register context_reg = esi;
+  Register slot_reg = ebx;
+  Register result_reg = eax;
+  Label slow_case;
+
+  // Go up context chain to the script context.
+  for (int i = 0; i < depth(); ++i) {
+    __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+    context_reg = result_reg;
+  }
+
+  // Load the PropertyCell value at the specified slot.
+  __ mov(result_reg, ContextOperand(context_reg, slot_reg));
+  __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
+
+  // Check that value is not the_hole.
+  __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &slow_case, Label::kNear);
+  __ Ret();
+
+  // Fallback to the runtime.
+  __ bind(&slow_case);
+  __ SmiTag(slot_reg);
+  __ Pop(result_reg);  // Pop return address.
+  __ Push(slot_reg);
+  __ Push(result_reg);  // Push return address.
+  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+  Register context_reg = esi;
+  Register slot_reg = ebx;
+  Register value_reg = eax;
+  Register cell_reg = edi;
+  Register cell_details_reg = edx;
+  Register cell_value_reg = ecx;
+  Label fast_heapobject_case, fast_smi_case, slow_case;
+
+  if (FLAG_debug_code) {
+    __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
+    __ Check(not_equal, kUnexpectedValue);
+  }
+
+  // Go up context chain to the script context.
+  for (int i = 0; i < depth(); ++i) {
+    __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+    context_reg = cell_reg;
+  }
+
+  // Load the PropertyCell at the specified slot.
+  __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
+
+  // Load PropertyDetails for the cell (actually only the cell_type and kind).
+  __ mov(cell_details_reg,
+         FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
+  __ SmiUntag(cell_details_reg);
+  __ and_(cell_details_reg,
+          Immediate(PropertyDetails::PropertyCellTypeField::kMask |
+                    PropertyDetails::KindField::kMask |
+                    PropertyDetails::kAttributesReadOnlyMask));
+
+  // Check if PropertyCell holds mutable data.
+  Label not_mutable_data;
+  __ cmp(cell_details_reg,
+         Immediate(PropertyDetails::PropertyCellTypeField::encode(
+                       PropertyCellType::kMutable) |
+                   PropertyDetails::KindField::encode(kData)));
+  __ j(not_equal, &not_mutable_data);
+  __ JumpIfSmi(value_reg, &fast_smi_case);
+  __ bind(&fast_heapobject_case);
+  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+                      cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // RecordWriteField clobbers the value register, so we need to reload.
+  __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+  __ Ret();
+  __ bind(&not_mutable_data);
+
+  // Check if PropertyCell value matches the new value (relevant for Constant,
+  // ConstantType and Undefined cells).
+  Label not_same_value;
+  __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+  __ cmp(cell_value_reg, value_reg);
+  __ j(not_equal, &not_same_value,
+       FLAG_debug_code ? Label::kFar : Label::kNear);
+  // Make sure the PropertyCell is not marked READ_ONLY.
+  __ test(cell_details_reg,
+          Immediate(PropertyDetails::kAttributesReadOnlyMask));
+  __ j(not_zero, &slow_case);
+  if (FLAG_debug_code) {
+    Label done;
+    // This can only be true for Constant, ConstantType and Undefined cells,
+    // because we never store the_hole via this stub.
+    __ cmp(cell_details_reg,
+           Immediate(PropertyDetails::PropertyCellTypeField::encode(
+                         PropertyCellType::kConstant) |
+                     PropertyDetails::KindField::encode(kData)));
+    __ j(equal, &done);
+    __ cmp(cell_details_reg,
+           Immediate(PropertyDetails::PropertyCellTypeField::encode(
+                         PropertyCellType::kConstantType) |
+                     PropertyDetails::KindField::encode(kData)));
+    __ j(equal, &done);
+    __ cmp(cell_details_reg,
+           Immediate(PropertyDetails::PropertyCellTypeField::encode(
+                         PropertyCellType::kUndefined) |
+                     PropertyDetails::KindField::encode(kData)));
+    __ Check(equal, kUnexpectedValue);
+    __ bind(&done);
+  }
+  __ Ret();
+  __ bind(&not_same_value);
+
+  // Check if PropertyCell contains data with constant type (and is not
+  // READ_ONLY).
+  __ cmp(cell_details_reg,
+         Immediate(PropertyDetails::PropertyCellTypeField::encode(
+                       PropertyCellType::kConstantType) |
+                   PropertyDetails::KindField::encode(kData)));
+  __ j(not_equal, &slow_case, Label::kNear);
+
+  // Now either both old and new values must be SMIs or both must be heap
+  // objects with same map.
+  Label value_is_heap_object;
+  __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
+  __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
+  // Old and new values are SMIs, no need for a write barrier here.
+  __ bind(&fast_smi_case);
+  __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+  __ Ret();
+  __ bind(&value_is_heap_object);
+  __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
+  Register cell_value_map_reg = cell_value_reg;
+  __ mov(cell_value_map_reg,
+         FieldOperand(cell_value_reg, HeapObject::kMapOffset));
+  __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
+  __ j(equal, &fast_heapobject_case);
+
+  // Fallback to the runtime.
+  __ bind(&slow_case);
+  __ SmiTag(slot_reg);
+  __ Pop(cell_reg);  // Pop return address.
+  __ Push(slot_reg);
+  __ Push(value_reg);
+  __ Push(cell_reg);  // Push return address.
+  __ TailCallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreGlobalViaContext_Strict
+                         : Runtime::kStoreGlobalViaContext_Sloppy);
+}
+
+
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+static Operand ApiParameterOperand(int index) {
+  return Operand(esp, index * kPointerSize);
+}
+
+
+// Prepares stack to put arguments (aligns and so on). Reserves
+// space for return value if needed (assumes the return value is a handle).
+// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+// etc. Saves context (esi). If space was reserved for return value then
+// stores the pointer to the reserved slot into esi.
+static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
+  __ EnterApiExitFrame(argc);
+  if (__ emit_debug_code()) {
+    __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
+  }
+}
+
+
+// Calls an API function.  Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions.  Clobbers ebx, edi and
+// caller-save registers.  Restores context.  On return removes
+// stack_space * kPointerSize (GCed).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+                                     Register function_address,
+                                     ExternalReference thunk_ref,
+                                     Operand thunk_last_arg, int stack_space,
+                                     Operand* stack_space_operand,
+                                     Operand return_value_operand,
+                                     Operand* context_restore_operand) {
+  Isolate* isolate = masm->isolate();
+
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address(isolate);
+  ExternalReference limit_address =
+      ExternalReference::handle_scope_limit_address(isolate);
+  ExternalReference level_address =
+      ExternalReference::handle_scope_level_address(isolate);
+
+  DCHECK(edx.is(function_address));
+  // Allocate HandleScope in callee-save registers.
+  __ mov(ebx, Operand::StaticVariable(next_address));
+  __ mov(edi, Operand::StaticVariable(limit_address));
+  __ add(Operand::StaticVariable(level_address), Immediate(1));
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ PushSafepointRegisters();
+    __ PrepareCallCFunction(1, eax);
+    __ mov(Operand(esp, 0),
+           Immediate(ExternalReference::isolate_address(isolate)));
+    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+                     1);
+    __ PopSafepointRegisters();
+  }
+
+
+  Label profiler_disabled;
+  Label end_profiler_check;
+  __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
+  __ cmpb(Operand(eax, 0), 0);
+  __ j(zero, &profiler_disabled);
+
+  // Additional parameter is the address of the actual getter function.
+  __ mov(thunk_last_arg, function_address);
+  // Call the api function.
+  __ mov(eax, Immediate(thunk_ref));
+  __ call(eax);
+  __ jmp(&end_profiler_check);
+
+  __ bind(&profiler_disabled);
+  // Call the api function.
+  __ call(function_address);
+  __ bind(&end_profiler_check);
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ PushSafepointRegisters();
+    __ PrepareCallCFunction(1, eax);
+    __ mov(Operand(esp, 0),
+           Immediate(ExternalReference::isolate_address(isolate)));
+    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+                     1);
+    __ PopSafepointRegisters();
+  }
+
+  Label prologue;
+  // Load the value from ReturnValue
+  __ mov(eax, return_value_operand);
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  __ bind(&prologue);
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  __ mov(Operand::StaticVariable(next_address), ebx);
+  __ sub(Operand::StaticVariable(level_address), Immediate(1));
+  __ Assert(above_equal, kInvalidHandleScopeLevel);
+  __ cmp(edi, Operand::StaticVariable(limit_address));
+  __ j(not_equal, &delete_allocated_handles);
+
+  // Leave the API exit frame.
+  __ bind(&leave_exit_frame);
+  bool restore_context = context_restore_operand != NULL;
+  if (restore_context) {
+    __ mov(esi, *context_restore_operand);
+  }
+  if (stack_space_operand != nullptr) {
+    __ mov(ebx, *stack_space_operand);
+  }
+  __ LeaveApiExitFrame(!restore_context);
+
+  // Check if the function scheduled an exception.
+  ExternalReference scheduled_exception_address =
+      ExternalReference::scheduled_exception_address(isolate);
+  __ cmp(Operand::StaticVariable(scheduled_exception_address),
+         Immediate(isolate->factory()->the_hole_value()));
+  __ j(not_equal, &promote_scheduled_exception);
+
+#if DEBUG
+  // Check if the function returned a valid JavaScript value.
+  Label ok;
+  Register return_value = eax;
+  Register map = ecx;
+
+  __ JumpIfSmi(return_value, &ok, Label::kNear);
+  __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+  __ CmpInstanceType(map, LAST_NAME_TYPE);
+  __ j(below_equal, &ok, Label::kNear);
+
+  __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+  __ j(above_equal, &ok, Label::kNear);
+
+  __ cmp(map, isolate->factory()->heap_number_map());
+  __ j(equal, &ok, Label::kNear);
+
+  __ cmp(return_value, isolate->factory()->undefined_value());
+  __ j(equal, &ok, Label::kNear);
+
+  __ cmp(return_value, isolate->factory()->true_value());
+  __ j(equal, &ok, Label::kNear);
+
+  __ cmp(return_value, isolate->factory()->false_value());
+  __ j(equal, &ok, Label::kNear);
+
+  __ cmp(return_value, isolate->factory()->null_value());
+  __ j(equal, &ok, Label::kNear);
+
+  __ Abort(kAPICallReturnedInvalidObject);
+
+  __ bind(&ok);
+#endif
+
+  if (stack_space_operand != nullptr) {
+    DCHECK_EQ(0, stack_space);
+    __ pop(ecx);
+    __ add(esp, ebx);
+    __ jmp(ecx);
+  } else {
+    __ ret(stack_space * kPointerSize);
+  }
+
+  // Re-throw by promoting a scheduled exception.
+  __ bind(&promote_scheduled_exception);
+  __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  ExternalReference delete_extensions =
+      ExternalReference::delete_handle_scope_extensions(isolate);
+  __ bind(&delete_allocated_handles);
+  __ mov(Operand::StaticVariable(limit_address), edi);
+  __ mov(edi, eax);
+  __ mov(Operand(esp, 0),
+         Immediate(ExternalReference::isolate_address(isolate)));
+  __ mov(eax, Immediate(delete_extensions));
+  __ call(eax);
+  __ mov(eax, edi);
+  __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+                                      const ParameterCount& argc,
+                                      bool return_first_arg,
+                                      bool call_data_undefined) {
   // ----------- S t a t e -------------
-  //  -- eax                 : callee
+  //  -- edi                 : callee
   //  -- ebx                 : call_data
   //  -- ecx                 : holder
   //  -- edx                 : api_function_address
   //  -- esi                 : context
+  //  -- eax                 : number of arguments if argc is a register
   //  --
   //  -- esp[0]              : return address
   //  -- esp[4]              : last argument
@@ -4696,16 +5490,12 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  Register callee = eax;
+  Register callee = edi;
   Register call_data = ebx;
   Register holder = ecx;
   Register api_function_address = edx;
-  Register return_address = edi;
   Register context = esi;
-
-  int argc = this->argc();
-  bool is_store = this->is_store();
-  bool call_data_undefined = this->call_data_undefined();
+  Register return_address = eax;
 
   typedef FunctionCallbackArguments FCA;
 
@@ -4718,12 +5508,17 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  __ pop(return_address);
+  DCHECK(argc.is_immediate() || eax.is(argc.reg()));
 
-  // context save
-  __ push(context);
-  // load context from callee
-  __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+  if (argc.is_immediate()) {
+    __ pop(return_address);
+    // context save.
+    __ push(context);
+  } else {
+    // pop return address and save context
+    __ xchg(context, Operand(esp, 0));
+    return_address = context;
+  }
 
   // callee
   __ push(callee);
@@ -4734,9 +5529,9 @@
   Register scratch = call_data;
   if (!call_data_undefined) {
     // return value
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    __ push(Immediate(masm->isolate()->factory()->undefined_value()));
     // return value default
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    __ push(Immediate(masm->isolate()->factory()->undefined_value()));
   } else {
     // return value
     __ push(scratch);
@@ -4744,15 +5539,18 @@
     __ push(scratch);
   }
   // isolate
-  __ push(Immediate(reinterpret_cast<int>(isolate())));
+  __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
   // holder
   __ push(holder);
 
   __ mov(scratch, esp);
 
-  // return address
+  // push return address
   __ push(return_address);
 
+  // load context from callee
+  __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
   // API function gets reference to the v8::Arguments. If CPU profiler
   // is enabled wrapper function will be called and we need to pass
   // address of the callback as additional parameter, always allocate
@@ -4763,41 +5561,76 @@
   // it's not controlled by GC.
   const int kApiStackSpace = 4;
 
-  __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+  PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
 
   // FunctionCallbackInfo::implicit_args_.
   __ mov(ApiParameterOperand(2), scratch);
-  __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
-  // FunctionCallbackInfo::values_.
-  __ mov(ApiParameterOperand(3), scratch);
-  // FunctionCallbackInfo::length_.
-  __ Move(ApiParameterOperand(4), Immediate(argc));
-  // FunctionCallbackInfo::is_construct_call_.
-  __ Move(ApiParameterOperand(5), Immediate(0));
+  if (argc.is_immediate()) {
+    __ add(scratch,
+           Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
+    // FunctionCallbackInfo::values_.
+    __ mov(ApiParameterOperand(3), scratch);
+    // FunctionCallbackInfo::length_.
+    __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
+    // FunctionCallbackInfo::is_construct_call_.
+    __ Move(ApiParameterOperand(5), Immediate(0));
+  } else {
+    __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
+                            (FCA::kArgsLength - 1) * kPointerSize));
+    // FunctionCallbackInfo::values_.
+    __ mov(ApiParameterOperand(3), scratch);
+    // FunctionCallbackInfo::length_.
+    __ mov(ApiParameterOperand(4), argc.reg());
+    // FunctionCallbackInfo::is_construct_call_.
+    __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
+                               (FCA::kArgsLength + 1) * kPointerSize));
+    __ mov(ApiParameterOperand(5), argc.reg());
+  }
 
   // v8::InvocationCallback's argument.
   __ lea(scratch, ApiParameterOperand(2));
   __ mov(ApiParameterOperand(0), scratch);
 
   ExternalReference thunk_ref =
-      ExternalReference::invoke_function_callback(isolate());
+      ExternalReference::invoke_function_callback(masm->isolate());
 
   Operand context_restore_operand(ebp,
                                   (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (is_store) {
+  if (return_first_arg) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
   }
   Operand return_value_operand(ebp, return_value_offset * kPointerSize);
-  __ CallApiFunctionAndReturn(api_function_address,
-                              thunk_ref,
-                              ApiParameterOperand(1),
-                              argc + FCA::kArgsLength + 1,
-                              return_value_operand,
-                              &context_restore_operand);
+  int stack_space = 0;
+  Operand is_construct_call_operand = ApiParameterOperand(5);
+  Operand* stack_space_operand = &is_construct_call_operand;
+  if (argc.is_immediate()) {
+    stack_space = argc.immediate() + FCA::kArgsLength + 1;
+    stack_space_operand = nullptr;
+  }
+  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+                           ApiParameterOperand(1), stack_space,
+                           stack_space_operand, return_value_operand,
+                           &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+  bool call_data_undefined = this->call_data_undefined();
+  CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
+                            call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+  bool is_store = this->is_store();
+  int argc = this->argc();
+  bool call_data_undefined = this->call_data_undefined();
+  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+                            call_data_undefined);
 }
 
 
@@ -4824,7 +5657,7 @@
   // load address of name
   __ lea(scratch, Operand(esp, 1 * kPointerSize));
 
-  __ PrepareCallApiFunction(kApiArgc);
+  PrepareCallApiFunction(masm, kApiArgc);
   __ mov(ApiParameterOperand(0), scratch);  // name.
   __ add(scratch, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
@@ -4832,17 +5665,15 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
-  __ CallApiFunctionAndReturn(api_function_address,
-                              thunk_ref,
-                              ApiParameterOperand(2),
-                              kStackSpace,
-                              Operand(ebp, 7 * kPointerSize),
-                              NULL);
+  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+                           ApiParameterOperand(2), kStackSpace, nullptr,
+                           Operand(ebp, 7 * kPointerSize), NULL);
 }
 
 
 #undef __
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 0b12fd0..121d12f 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -76,7 +76,7 @@
                                      Register r0,
                                      Register r1);
 
-  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() override { return false; }
 
  private:
   static const int kInlinedProbes = 4;
@@ -142,7 +142,7 @@
     INCREMENTAL_COMPACTION
   };
 
-  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() override { return false; }
 
   static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
   static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
@@ -188,7 +188,7 @@
         break;
     }
     DCHECK(GetMode(stub) == mode);
-    CpuFeatures::FlushICache(stub->instruction_start(), 7);
+    Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
   }
 
   DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -320,13 +320,15 @@
     Register GetRegThatIsNotEcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
-        Register candidate = Register::FromAllocationIndex(i);
-        if (candidate.is(ecx)) continue;
-        if (candidate.is(r1)) continue;
-        if (candidate.is(r2)) continue;
-        if (candidate.is(r3)) continue;
-        return candidate;
+      for (int i = 0; i < Register::kNumRegisters; i++) {
+        Register candidate = Register::from_code(i);
+        if (candidate.IsAllocatable()) {
+          if (candidate.is(ecx)) continue;
+          if (candidate.is(r1)) continue;
+          if (candidate.is(r2)) continue;
+          if (candidate.is(r3)) continue;
+          return candidate;
+        }
       }
       UNREACHABLE();
       return no_reg;
@@ -339,9 +341,9 @@
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  inline Major MajorKey() const FINAL { return RecordWrite; }
+  inline Major MajorKey() const final { return RecordWrite; }
 
-  void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) override;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -349,7 +351,7 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) OVERRIDE {
+  void Activate(Code* code) override {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
@@ -385,6 +387,7 @@
 };
 
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 083f5db..2f94f35 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
+#include "src/ia32/codegen-ia32.h"
 
 #if V8_TARGET_ARCH_IA32
 
@@ -34,15 +34,15 @@
 #define __ masm.
 
 
-UnaryMathFunction CreateExpFunction() {
-  if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
   size_t actual_size;
   byte* buffer =
       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == NULL) return &std::exp;
+  if (buffer == nullptr) return nullptr;
   ExternalReference::InitializeMathExpData();
 
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+                      CodeObjectRequired::kNo);
   // esp[1 * kPointerSize]: raw double input
   // esp[0 * kPointerSize]: return address
   {
@@ -65,19 +65,20 @@
   masm.GetCode(&desc);
   DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CpuFeatures::FlushICache(buffer, actual_size);
+  Assembler::FlushICache(isolate, buffer, actual_size);
   base::OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
 }
 
 
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
   size_t actual_size;
   // Allocate buffer in executable space.
   byte* buffer =
       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == NULL) return &std::sqrt;
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  if (buffer == nullptr) return nullptr;
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+                      CodeObjectRequired::kNo);
   // esp[1 * kPointerSize]: raw double input
   // esp[0 * kPointerSize]: return address
   // Move double input into registers.
@@ -94,9 +95,9 @@
   masm.GetCode(&desc);
   DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CpuFeatures::FlushICache(buffer, actual_size);
+  Assembler::FlushICache(isolate, buffer, actual_size);
   base::OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
 }
 
 
@@ -186,13 +187,14 @@
 };
 
 
-MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
   size_t actual_size;
   // Allocate buffer in executable space.
   byte* buffer =
       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == NULL) return NULL;
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  if (buffer == nullptr) return nullptr;
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+                      CodeObjectRequired::kNo);
   LabelConverter conv(buffer);
 
   // Generated code is put into a fixed, unmovable buffer, and not into
@@ -505,7 +507,7 @@
   CodeDesc desc;
   masm.GetCode(&desc);
   DCHECK(!RelocInfo::RequiresRelocation(desc));
-  CpuFeatures::FlushICache(buffer, actual_size);
+  Assembler::FlushICache(isolate, buffer, actual_size);
   base::OS::ProtectCode(buffer, actual_size);
   // TODO(jkummerow): It would be nice to register this code creation event
   // with the PROFILE / GDBJIT system.
@@ -986,9 +988,11 @@
 #undef __
 
 
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+  USE(isolate);
   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
-  CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+  CodePatcher patcher(isolate, young_sequence_.start(),
+                      young_sequence_.length());
   patcher.masm()->push(ebp);
   patcher.masm()->mov(ebp, esp);
   patcher.masm()->push(esi);
@@ -1032,15 +1036,16 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CpuFeatures::FlushICache(sequence, young_length);
+    Assembler::FlushICache(isolate, sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
-    CodePatcher patcher(sequence, young_length);
+    CodePatcher patcher(isolate, sequence, young_length);
     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   }
 }
 
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 2382388..133b1ad 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -5,7 +5,7 @@
 #ifndef V8_IA32_CODEGEN_IA32_H_
 #define V8_IA32_CODEGEN_IA32_H_
 
-#include "src/ast.h"
+#include "src/ast/ast.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
@@ -42,6 +42,7 @@
   DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
 };
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 00c2043..8de6d1e 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -8,8 +8,6 @@
 #include "src/third_party/valgrind/valgrind.h"
 #endif
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/assembler.h"
@@ -39,6 +37,7 @@
 #endif
 }
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
deleted file mode 100644
index 34b33b2..0000000
--- a/src/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
-  DCHECK(Assembler::kJSReturnSequenceLength >=
-         Assembler::kCallInstructionLength);
-  rinfo()->PatchCodeWithCall(
-      debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
-      Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  rinfo()->PatchCode(original_rinfo()->pc(),
-                     Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  DCHECK(IsDebugBreakSlot());
-  // Check whether the debug break slot instructions have been patched.
-  return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
-  DCHECK(IsDebugBreakSlot());
-  Isolate* isolate = debug_info_->GetIsolate();
-  rinfo()->PatchCodeWithCall(
-      isolate->builtins()->Slot_DebugBreak()->entry(),
-      Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  DCHECK(IsDebugBreakSlot());
-  rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
-                                          RegList object_regs,
-                                          RegList non_object_regs,
-                                          bool convert_call_to_jmp) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Load padding words on stack.
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
-    }
-    __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
-    // Store the registers containing live values on the expression stack to
-    // make sure that these are correctly updated during GC. Non object values
-    // are stored as a smi causing it to be untouched by GC.
-    DCHECK((object_regs & ~kJSCallerSaved) == 0);
-    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
-    DCHECK((object_regs & non_object_regs) == 0);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((object_regs & (1 << r)) != 0) {
-        __ push(reg);
-      }
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ test(reg, Immediate(0xc0000000));
-          __ Assert(zero, kUnableToEncodeValueAsSmi);
-        }
-        __ SmiTag(reg);
-        __ push(reg);
-      }
-    }
-
-#ifdef DEBUG
-    __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
-    __ Move(eax, Immediate(0));  // No arguments.
-    __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
-    CEntryStub ceb(masm->isolate(), 1);
-    __ CallStub(&ceb);
-
-    // Automatically find register that could be used after register restore.
-    // We need one register for padding skip instructions.
-    Register unused_reg = { -1 };
-
-    // Restore the register values containing object pointers from the
-    // expression stack.
-    for (int i = kNumJSCallerSaved; --i >= 0;) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if (FLAG_debug_code) {
-        __ Move(reg, Immediate(kDebugZapValue));
-      }
-      bool taken = reg.code() == esi.code();
-      if ((object_regs & (1 << r)) != 0) {
-        __ pop(reg);
-        taken = true;
-      }
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ pop(reg);
-        __ SmiUntag(reg);
-        taken = true;
-      }
-      if (!taken) {
-        unused_reg = reg;
-      }
-    }
-
-    DCHECK(unused_reg.code() != -1);
-
-    // Read current padding counter and skip corresponding number of words.
-    __ pop(unused_reg);
-    // We divide stored value by 2 (untagging) and multiply it by word's size.
-    STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
-    __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
-    // Get rid of the internal frame.
-  }
-
-  // If this call did not replace a call but patched other code then there will
-  // be an unwanted return address left on the stack. Here we get rid of that.
-  if (convert_call_to_jmp) {
-    __ add(esp, Immediate(kPointerSize));
-  }
-
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallICStub
-  // ----------- S t a t e -------------
-  //  -- edx    : type feedback slot (smi)
-  //  -- edi    : function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
-                                0, false);
-}
-
-
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
-  // Register state for IC load call (from ic-ia32.cc).
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  RegList regs = receiver.bit() | name.bit();
-  if (FLAG_vector_ics) {
-    regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
-  }
-  Generate_DebugBreakCallHelper(masm, regs, 0, false);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
-  // Register state for IC store call (from ic-ia32.cc).
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Generate_DebugBreakCallHelper(
-      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  // Register state for keyed IC load call (from ic-ia32.cc).
-  GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // Register state for keyed IC store call (from ic-ia32.cc).
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Generate_DebugBreakCallHelper(
-      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
-  // Register state for CompareNil IC
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  // Register state just before return from JS function (from codegen-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- eax: return value
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- edi: function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-ia32.cc).
-  // eax is the actual number of arguments not encoded as a smi see comment
-  // above IC call.
-  // ----------- S t a t e -------------
-  //  -- eax: number of arguments (not smi)
-  //  -- edi: constructor function
-  // -----------------------------------
-  // The number of arguments in eax is not smi encoded.
-  Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
-    MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-ia32.cc).
-  // eax is the actual number of arguments not encoded as a smi see comment
-  // above IC call.
-  // ----------- S t a t e -------------
-  //  -- eax: number of arguments (not smi)
-  //  -- ebx: feedback array
-  //  -- edx: feedback slot (smi)
-  //  -- edi: constructor function
-  // -----------------------------------
-  // The number of arguments in eax is not smi encoded.
-  Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
-                                eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
-  // Generate enough nop's to make space for a call instruction.
-  Label check_codesize;
-  __ bind(&check_codesize);
-  __ RecordDebugBreakSlot();
-  __ Nop(Assembler::kDebugBreakSlotLength);
-  DCHECK_EQ(Assembler::kDebugBreakSlotLength,
-            masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
-  // In the places where a debug break slot is inserted no registers can contain
-  // object pointers.
-  Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  masm->ret(0);
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  ExternalReference restarter_frame_function_slot =
-      ExternalReference::debug_restarter_frame_function_pointer_address(
-          masm->isolate());
-  __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
-  // We do not know our frame height, but set esp based on ebp.
-  __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
-  __ pop(edi);  // Function.
-  __ pop(ebp);
-
-  // Load context from the function.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  // Get function code.
-  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
-  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
-  // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(edx);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index e451fcc..efe6476 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
+#include "src/register-configuration.h"
 #include "src/safepoint-table.h"
 
 namespace v8 {
@@ -75,7 +75,7 @@
         new_reloc->GetDataStartAddress() + padding, 0);
     intptr_t comment_string
         = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
-    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+    RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
     for (int i = 0; i < additional_comments; ++i) {
 #ifdef DEBUG
       byte* pos_before = reloc_info_writer.pos();
@@ -101,14 +101,15 @@
     } else {
       pointer = code->instruction_start();
     }
-    CodePatcher patcher(pointer, 1);
+    CodePatcher patcher(isolate, pointer, 1);
     patcher.masm()->int3();
 
     DeoptimizationInputData* data =
         DeoptimizationInputData::cast(code->deoptimization_data());
     int osr_offset = data->OsrPcOffset()->value();
     if (osr_offset > 0) {
-      CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+      CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+                              1);
       osr_patcher.masm()->int3();
     }
   }
@@ -137,14 +138,13 @@
     if (deopt_data->Pc(i)->value() == -1) continue;
     // Patch lazy deoptimization entry.
     Address call_address = code_start_address + deopt_data->Pc(i)->value();
-    CodePatcher patcher(call_address, patch_size());
+    CodePatcher patcher(isolate, call_address, patch_size());
     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
     patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
     // We use RUNTIME_ENTRY for deoptimization bailouts.
-    RelocInfo rinfo(call_address + 1,  // 1 after the call opcode.
+    RelocInfo rinfo(isolate, call_address + 1,  // 1 after the call opcode.
                     RelocInfo::RUNTIME_ENTRY,
-                    reinterpret_cast<intptr_t>(deopt_entry),
-                    NULL);
+                    reinterpret_cast<intptr_t>(deopt_entry), NULL);
     reloc_info_writer.Write(&rinfo);
     DCHECK_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
@@ -157,18 +157,15 @@
   }
 
   // Move the relocation info to the beginning of the byte array.
-  int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
-  MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+  const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
+  MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
 
-  // The relocation info is in place, update the size.
-  reloc_info->set_length(new_reloc_size);
-
-  // Handle the junk part after the new relocation info. We will create
-  // a non-live object in the extra space at the end of the former reloc info.
-  Address junk_address = reloc_info->address() + reloc_info->Size();
-  DCHECK(junk_address <= reloc_end_address);
-  isolate->heap()->CreateFillerObjectAt(junk_address,
-                                        reloc_end_address - junk_address);
+  // Right trim the relocation info to free up remaining space.
+  const int delta = reloc_info->length() - new_reloc_length;
+  if (delta > 0) {
+    isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+        reloc_info, delta);
+  }
 }
 
 
@@ -182,7 +179,7 @@
   }
   input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
+  for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -204,7 +201,7 @@
 
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
-  for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
     double double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
@@ -212,7 +209,8 @@
 
 
 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
-  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  int parameter_count =
+      function->shared()->internal_formal_parameter_count() + 1;
   unsigned input_frame_size = input_->GetFrameSize();
   unsigned alignment_state_offset =
       input_frame_size - parameter_count * kPointerSize -
@@ -227,23 +225,28 @@
 
 #define __ masm()->
 
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
   GeneratePrologue();
 
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
 
-  const int kDoubleRegsSize = kDoubleSize *
-                              XMMRegister::kMaxNumAllocatableRegisters;
+  const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
   __ sub(esp, Immediate(kDoubleRegsSize));
-  for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
-    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    XMMRegister xmm_reg = XMMRegister::from_code(code);
+    int offset = code * kDoubleSize;
     __ movsd(Operand(esp, offset), xmm_reg);
   }
 
   __ pushad();
 
+  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+  __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+
   const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
                                       kDoubleRegsSize;
 
@@ -285,9 +288,10 @@
 
   int double_regs_offset = FrameDescription::double_registers_offset();
   // Fill in the double input registers.
-  for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize;
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    int dst_offset = code * kDoubleSize + double_regs_offset;
+    int src_offset = code * kDoubleSize;
     __ movsd(xmm0, Operand(esp, src_offset));
     __ movsd(Operand(ebx, dst_offset), xmm0);
   }
@@ -368,9 +372,10 @@
   __ j(below, &outer_push_loop);
 
   // In case of a failed STUB, we have to restore the XMM registers.
-  for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
-    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-    int src_offset = i * kDoubleSize + double_regs_offset;
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    XMMRegister xmm_reg = XMMRegister::from_code(code);
+    int src_offset = code * kDoubleSize + double_regs_offset;
     __ movsd(xmm_reg, Operand(ebx, src_offset));
   }
 
@@ -419,7 +424,7 @@
 
 
 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
-  // No out-of-line constant pool support.
+  // No embedded constant pool support.
   UNREACHABLE();
 }
 
@@ -427,6 +432,7 @@
 #undef __
 
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index bf88f69..5a43280 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -6,8 +6,6 @@
 #include <stdarg.h>
 #include <stdio.h>
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/disasm.h"
@@ -299,6 +297,12 @@
     return (checked & 4) != 1;
   }
 
+  bool vex_none() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 0;
+  }
+
   bool vex_66() {
     DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
     byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
@@ -804,6 +808,11 @@
                        NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
         break;
+      case 0xf7:
+        AppendToBuffer("shlx %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfCPURegister(vvvv));
+        break;
       default:
         UnimplementedInstruction();
     }
@@ -826,11 +835,185 @@
                        NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
         break;
+      case 0x5d:
+        AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
       case 0x5e:
         AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
                        NameOfXMMRegister(vvvv));
         current += PrintRightXMMOperand(current);
         break;
+      case 0x5f:
+        AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f3() && vex_0f()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x58:
+        AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x59:
+        AppendToBuffer("vmulss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5c:
+        AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5d:
+        AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5e:
+        AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5f:
+        AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_none() && vex_0f38()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    const char* mnem = "?";
+    switch (opcode) {
+      case 0xf2:
+        AppendToBuffer("andn %s,%s,", NameOfCPURegister(regop),
+                       NameOfCPURegister(vvvv));
+        current += PrintRightOperand(current);
+        break;
+      case 0xf5:
+        AppendToBuffer("bzhi %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfCPURegister(vvvv));
+        break;
+      case 0xf7:
+        AppendToBuffer("bextr %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfCPURegister(vvvv));
+        break;
+      case 0xf3:
+        switch (regop) {
+          case 1:
+            mnem = "blsr";
+            break;
+          case 2:
+            mnem = "blsmsk";
+            break;
+          case 3:
+            mnem = "blsi";
+            break;
+          default:
+            UnimplementedInstruction();
+        }
+        AppendToBuffer("%s %s,", mnem, NameOfCPURegister(vvvv));
+        current += PrintRightOperand(current);
+        mnem = "?";
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f2() && vex_0f38()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0xf5:
+        AppendToBuffer("pdep %s,%s,", NameOfCPURegister(regop),
+                       NameOfCPURegister(vvvv));
+        current += PrintRightOperand(current);
+        break;
+      case 0xf6:
+        AppendToBuffer("mulx %s,%s,", NameOfCPURegister(regop),
+                       NameOfCPURegister(vvvv));
+        current += PrintRightOperand(current);
+        break;
+      case 0xf7:
+        AppendToBuffer("shrx %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfCPURegister(vvvv));
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f3() && vex_0f38()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0xf5:
+        AppendToBuffer("pext %s,%s,", NameOfCPURegister(regop),
+                       NameOfCPURegister(vvvv));
+        current += PrintRightOperand(current);
+        break;
+      case 0xf7:
+        AppendToBuffer("sarx %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfCPURegister(vvvv));
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f2() && vex_0f3a()) {
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0xf0:
+        AppendToBuffer("rorx %s,", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%d", *current & 0x1f);
+        current += 1;
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_none() && vex_0f()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x54:
+        AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x57:
+        AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_66() && vex_0f()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x54:
+        AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x57:
+        AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
       default:
         UnimplementedInstruction();
     }
@@ -1035,6 +1218,8 @@
 // Returns NULL if the instruction is not handled here.
 static const char* F0Mnem(byte f0byte) {
   switch (f0byte) {
+    case 0x0B:
+      return "ud2";
     case 0x18: return "prefetch";
     case 0xA2: return "cpuid";
     case 0xBE: return "movsx_b";
@@ -1046,6 +1231,8 @@
     case 0xAD: return "shrd";
     case 0xAC: return "shrd";  // 3-operand version.
     case 0xAB: return "bts";
+    case 0xBC:
+      return "bsf";
     case 0xBD: return "bsr";
     default: return NULL;
   }
@@ -1215,7 +1402,7 @@
                      data[7] == 0) {
             AppendToBuffer("nop");  // 8 byte nop.
             data += 8;
-          } else if (f0byte == 0xA2 || f0byte == 0x31) {
+          } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
           } else if (f0byte == 0x28) {
@@ -1297,6 +1484,12 @@
             } else {
               AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
             }
+          } else if (f0byte == 0xBC) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+            data += PrintRightOperand(data);
           } else if (f0byte == 0xBD) {
             data += 2;
             int mod, regop, rm;
@@ -1431,11 +1624,7 @@
               data++;
             } else if (*data == 0x2A) {
               // movntdqa
-              data++;
-              int mod, regop, rm;
-              get_modrm(*data, &mod, &regop, &rm);
-              AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
-              data += PrintRightOperand(data);
+              UnimplementedInstruction();
             } else {
               UnimplementedInstruction();
             }
@@ -1454,7 +1643,7 @@
             } else if (*data == 0x16) {
               data++;
               int mod, regop, rm;
-              get_modrm(*data, &mod, &regop, &rm);
+              get_modrm(*data, &mod, &rm, &regop);
               int8_t imm8 = static_cast<int8_t>(data[1]);
               AppendToBuffer("pextrd %s,%s,%d",
                              NameOfCPURegister(regop),
@@ -1552,6 +1741,20 @@
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
+          } else if (*data == 0x62) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("punpckldq %s,%s", NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
+          } else if (*data == 0x6A) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("punpckhdq %s,%s", NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0x76) {
             data++;
             int mod, regop, rm;
@@ -1626,9 +1829,8 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             if (mod == 3) {
-              AppendToBuffer("movntdq ");
-              data += PrintRightOperand(data);
-              AppendToBuffer(",%s", NameOfXMMRegister(regop));
+              // movntdq
+              UnimplementedInstruction();
             } else {
               UnimplementedInstruction();
             }
@@ -1735,14 +1937,36 @@
           } else {
             const char* mnem = "?";
             switch (b2) {
-              case 0x2A: mnem = "cvtsi2sd"; break;
-              case 0x2C: mnem = "cvttsd2si"; break;
-              case 0x2D: mnem = "cvtsd2si"; break;
-              case 0x51: mnem = "sqrtsd"; break;
-              case 0x58: mnem = "addsd"; break;
-              case 0x59: mnem = "mulsd"; break;
-              case 0x5C: mnem = "subsd"; break;
-              case 0x5E: mnem = "divsd"; break;
+              case 0x2A:
+                mnem = "cvtsi2sd";
+                break;
+              case 0x2C:
+                mnem = "cvttsd2si";
+                break;
+              case 0x2D:
+                mnem = "cvtsd2si";
+                break;
+              case 0x51:
+                mnem = "sqrtsd";
+                break;
+              case 0x58:
+                mnem = "addsd";
+                break;
+              case 0x59:
+                mnem = "mulsd";
+                break;
+              case 0x5C:
+                mnem = "subsd";
+                break;
+              case 0x5D:
+                mnem = "minsd";
+                break;
+              case 0x5E:
+                mnem = "divsd";
+                break;
+              case 0x5F:
+                mnem = "maxsd";
+                break;
             }
             data += 3;
             int mod, regop, rm;
@@ -1796,42 +2020,12 @@
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
             data += PrintRightXMMOperand(data);
-          } else if (b2 == 0x2C) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
-            data += PrintRightXMMOperand(data);
-          } else if (b2 == 0x58) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("addss %s,", NameOfXMMRegister(regop));
-            data += PrintRightXMMOperand(data);
-          } else if (b2 == 0x59) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("mulss %s,", NameOfXMMRegister(regop));
-            data += PrintRightXMMOperand(data);
           } else if (b2 == 0x5A) {
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
             data += PrintRightXMMOperand(data);
-          } else if (b2 == 0x5c) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("subss %s,", NameOfXMMRegister(regop));
-            data += PrintRightXMMOperand(data);
-          } else if (b2 == 0x5e) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("divss %s,", NameOfXMMRegister(regop));
-            data += PrintRightXMMOperand(data);
           } else if (b2 == 0x6F) {
             data += 3;
             int mod, regop, rm;
@@ -1845,8 +2039,79 @@
             get_modrm(*data, &mod, &regop, &rm);
             data += PrintRightXMMOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (b2 == 0xB8) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("popcnt %s,", NameOfCPURegister(regop));
+            data += PrintRightOperand(data);
+          } else if (b2 == 0xBC) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("tzcnt %s,", NameOfCPURegister(regop));
+            data += PrintRightOperand(data);
+          } else if (b2 == 0xBD) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("lzcnt %s,", NameOfCPURegister(regop));
+            data += PrintRightOperand(data);
           } else {
-            UnimplementedInstruction();
+            const char* mnem = "?";
+            switch (b2) {
+              case 0x2A:
+                mnem = "cvtsi2ss";
+                break;
+              case 0x2C:
+                mnem = "cvttss2si";
+                break;
+              case 0x2D:
+                mnem = "cvtss2si";
+                break;
+              case 0x51:
+                mnem = "sqrtss";
+                break;
+              case 0x58:
+                mnem = "addss";
+                break;
+              case 0x59:
+                mnem = "mulss";
+                break;
+              case 0x5C:
+                mnem = "subss";
+                break;
+              case 0x5D:
+                mnem = "minss";
+                break;
+              case 0x5E:
+                mnem = "divss";
+                break;
+              case 0x5F:
+                mnem = "maxss";
+                break;
+            }
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            if (b2 == 0x2A) {
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightOperand(data);
+            } else if (b2 == 0x2C || b2 == 0x2D) {
+              AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+              data += PrintRightXMMOperand(data);
+            } else if (b2 == 0xC2) {
+              // Intel manual 2A, Table 3-18.
+              const char* const pseudo_op[] = {
+                  "cmpeqss",  "cmpltss",  "cmpless",  "cmpunordss",
+                  "cmpneqss", "cmpnltss", "cmpnless", "cmpordss"};
+              AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+                             NameOfXMMRegister(regop), NameOfXMMRegister(rm));
+              data += 2;
+            } else {
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightXMMOperand(data);
+            }
           }
         } else if (*(data+1) == 0xA5) {
           data += 2;
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 18f1960..255bdbb 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -2,14 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/assembler.h"
 #include "src/frames.h"
 #include "src/ia32/assembler-ia32-inl.h"
 #include "src/ia32/assembler-ia32.h"
+#include "src/ia32/frames-ia32.h"
 
 namespace v8 {
 namespace internal {
@@ -31,12 +30,7 @@
 }
 
 
-Object*& ExitFrame::constant_pool_slot() const {
-  UNREACHABLE();
-  return Memory::Object_at(NULL);
-}
-
-
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 1290ad6..609dfec 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -39,6 +39,7 @@
  public:
   static const int kCallerFPOffset      = -6 * kPointerSize;
 
+  static const int kNewTargetArgOffset  = +2 * kPointerSize;
   static const int kFunctionArgOffset   = +3 * kPointerSize;
   static const int kReceiverArgOffset   = +4 * kPointerSize;
   static const int kArgcOffset          = +5 * kPointerSize;
@@ -79,47 +80,7 @@
 };
 
 
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
-  // FP-relative.
-  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
-  // FP-relative.
-  static const int kImplicitReceiverOffset = -5 * kPointerSize;
-  static const int kConstructorOffset      = kMinInt;
-  static const int kLengthOffset           = -4 * kPointerSize;
-  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
-  // FP-relative.
-  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
-  const int offset = JavaScriptFrameConstants::kFunctionOffset;
-  return Memory::Object_at(fp() + offset);
-}
-
-
-inline void StackHandler::SetFp(Address slot, Address fp) {
-  Memory::Address_at(slot) = fp;
-}
-
-
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_FRAMES_IA32_H_
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
deleted file mode 100644
index 1ba4095..0000000
--- a/src/ia32/full-codegen-ia32.cc
+++ /dev/null
@@ -1,5185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
-  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
-    info_emitted_ = false;
-#endif
-  }
-
-  ~JumpPatchSite() {
-    DCHECK(patch_site_.is_bound() == info_emitted_);
-  }
-
-  void EmitJumpIfNotSmi(Register reg,
-                        Label* target,
-                        Label::Distance distance = Label::kFar) {
-    __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(not_carry, target, distance);  // Always taken before patched.
-  }
-
-  void EmitJumpIfSmi(Register reg,
-                     Label* target,
-                     Label::Distance distance = Label::kFar) {
-    __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(carry, target, distance);  // Never taken before patched.
-  }
-
-  void EmitPatchInfo() {
-    if (patch_site_.is_bound()) {
-      int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-      DCHECK(is_uint8(delta_to_patch_site));
-      __ test(eax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
-      info_emitted_ = true;
-#endif
-    } else {
-      __ nop();  // Signals no inlined code.
-    }
-  }
-
- private:
-  // jc will be patched with jz, jnc will become jnz.
-  void EmitJump(Condition cc, Label* target, Label::Distance distance) {
-    DCHECK(!patch_site_.is_bound() && !info_emitted_);
-    DCHECK(cc == carry || cc == not_carry);
-    __ bind(&patch_site_);
-    __ j(cc, target, distance);
-  }
-
-  MacroAssembler* masm_;
-  Label patch_site_;
-#ifdef DEBUG
-  bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function.  On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them.  The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-//   o edi: the JS function object being called (i.e. ourselves)
-//   o esi: our context
-//   o ebp: our caller's frame pointer
-//   o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate() {
-  CompilationInfo* info = info_;
-  handler_table_ =
-      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
-  profiling_counter_ = isolate()->factory()->NewCell(
-      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
-  SetFunctionPosition(function());
-  Comment cmnt(masm_, "[ function compiled by full code generator");
-
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
-#endif
-
-  // Sloppy mode functions and builtins need to replace the receiver with the
-  // global proxy when called as functions (without an explicit receiver
-  // object).
-  if (info->strict_mode() == SLOPPY && !info->is_native()) {
-    Label ok;
-    // +1 for return address.
-    int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
-    __ mov(ecx, Operand(esp, receiver_offset));
-
-    __ cmp(ecx, isolate()->factory()->undefined_value());
-    __ j(not_equal, &ok, Label::kNear);
-
-    __ mov(ecx, GlobalObjectOperand());
-    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
-    __ mov(Operand(esp, receiver_offset), ecx);
-
-    __ bind(&ok);
-  }
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done below).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
-  info->set_prologue_offset(masm_->pc_offset());
-  __ Prologue(info->IsCodePreAgingActive());
-  info->AddNoFrameRange(0, masm_->pc_offset());
-
-  { Comment cmnt(masm_, "[ Allocate locals");
-    int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!info->function()->is_generator() || locals_count == 0);
-    if (locals_count == 1) {
-      __ push(Immediate(isolate()->factory()->undefined_value()));
-    } else if (locals_count > 1) {
-      if (locals_count >= 128) {
-        Label ok;
-        __ mov(ecx, esp);
-        __ sub(ecx, Immediate(locals_count * kPointerSize));
-        ExternalReference stack_limit =
-            ExternalReference::address_of_real_stack_limit(isolate());
-        __ cmp(ecx, Operand::StaticVariable(stack_limit));
-        __ j(above_equal, &ok, Label::kNear);
-        __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
-        __ bind(&ok);
-      }
-      __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
-      const int kMaxPushes = 32;
-      if (locals_count >= kMaxPushes) {
-        int loop_iterations = locals_count / kMaxPushes;
-        __ mov(ecx, loop_iterations);
-        Label loop_header;
-        __ bind(&loop_header);
-        // Do pushes.
-        for (int i = 0; i < kMaxPushes; i++) {
-          __ push(eax);
-        }
-        __ dec(ecx);
-        __ j(not_zero, &loop_header, Label::kNear);
-      }
-      int remaining = locals_count % kMaxPushes;
-      // Emit the remaining pushes.
-      for (int i  = 0; i < remaining; i++) {
-        __ push(eax);
-      }
-    }
-  }
-
-  bool function_in_register = true;
-
-  // Possibly allocate a local context.
-  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0) {
-    Comment cmnt(masm_, "[ Allocate context");
-    bool need_write_barrier = true;
-    // Argument to NewContext is the function, which is still in edi.
-    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
-      __ push(edi);
-      __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewScriptContext, 2);
-    } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub(isolate(), heap_slots);
-      __ CallStub(&stub);
-      // Result of FastNewContextStub is always in new space.
-      need_write_barrier = false;
-    } else {
-      __ push(edi);
-      __ CallRuntime(Runtime::kNewFunctionContext, 1);
-    }
-    function_in_register = false;
-    // Context is returned in eax.  It replaces the context passed to us.
-    // It's saved in the stack and kept live in esi.
-    __ mov(esi, eax);
-    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
-
-    // Copy parameters into context if necessary.
-    int num_parameters = info->scope()->num_parameters();
-    for (int i = 0; i < num_parameters; i++) {
-      Variable* var = scope()->parameter(i);
-      if (var->IsContextSlot()) {
-        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
-            (num_parameters - 1 - i) * kPointerSize;
-        // Load parameter from stack.
-        __ mov(eax, Operand(ebp, parameter_offset));
-        // Store it in the context.
-        int context_offset = Context::SlotOffset(var->index());
-        __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers eax and ebx.
-        if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx,
-                                    kDontSaveFPRegs);
-        } else if (FLAG_debug_code) {
-          Label done;
-          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
-          __ Abort(kExpectedNewSpaceObject);
-          __ bind(&done);
-        }
-      }
-    }
-  }
-
-  Variable* arguments = scope()->arguments();
-  if (arguments != NULL) {
-    // Function uses arguments object.
-    Comment cmnt(masm_, "[ Allocate arguments object");
-    if (function_in_register) {
-      __ push(edi);
-    } else {
-      __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ lea(edx,
-           Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-    __ push(edx);
-    __ push(Immediate(Smi::FromInt(num_parameters)));
-    // Arguments to ArgumentsAccessStub:
-    //   function, receiver address, parameter count.
-    // The stub will rewrite receiver and parameter count if the previous
-    // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub::Type type;
-    if (strict_mode() == STRICT) {
-      type = ArgumentsAccessStub::NEW_STRICT;
-    } else if (function()->has_duplicate_parameters()) {
-      type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
-    } else {
-      type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
-    }
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
-
-    SetVar(arguments, eax, ebx, edx);
-  }
-
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
-    Comment cmnt(masm_, "[ Declarations");
-    scope()->VisitIllegalRedeclaration(this);
-
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      // For named function expressions, declare the function name as a
-      // constant.
-      if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableDeclaration* function = scope()->function();
-        DCHECK(function->proxy()->var()->mode() == CONST ||
-               function->proxy()->var()->mode() == CONST_LEGACY);
-        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
-        VisitVariableDeclaration(function);
-      }
-      VisitDeclarations(scope()->declarations());
-    }
-
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      ExternalReference stack_limit
-          = ExternalReference::address_of_stack_limit(isolate());
-      __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, &ok, Label::kNear);
-      __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(function()->body());
-      DCHECK(loop_depth() == 0);
-    }
-  }
-
-  // Always emit a 'return undefined' in case control fell off the end of
-  // the body.
-  { Comment cmnt(masm_, "[ return <undefined>;");
-    __ mov(eax, isolate()->factory()->undefined_value());
-    EmitReturnSequence();
-  }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
-  __ Move(eax, Immediate(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
-  __ mov(ebx, Immediate(profiling_counter_));
-  __ sub(FieldOperand(ebx, Cell::kValueOffset),
-         Immediate(Smi::FromInt(delta)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
-  int reset_value = FLAG_interrupt_budget;
-  __ mov(ebx, Immediate(profiling_counter_));
-  __ mov(FieldOperand(ebx, Cell::kValueOffset),
-         Immediate(Smi::FromInt(reset_value)));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
-                                                Label* back_edge_target) {
-  Comment cmnt(masm_, "[ Back edge bookkeeping");
-  Label ok;
-
-  DCHECK(back_edge_target->is_bound());
-  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-  int weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-  EmitProfilingCounterDecrement(weight);
-  __ j(positive, &ok, Label::kNear);
-  __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
-  // Record a mapping of this PC offset to the OSR id.  This is used to find
-  // the AST id from the unoptimized code in order to use it as a key into
-  // the deoptimization input data found in the optimized code.
-  RecordBackEdge(stmt->OsrEntryId());
-
-  EmitProfilingCounterReset();
-
-  __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-  // Record a mapping of the OSR id to this PC.  This is used if the OSR
-  // entry becomes the target of a bailout.  We don't expect it to be, but
-  // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
-  Comment cmnt(masm_, "[ Return sequence");
-  if (return_label_.is_bound()) {
-    __ jmp(&return_label_);
-  } else {
-    // Common return label
-    __ bind(&return_label_);
-    if (FLAG_trace) {
-      __ push(eax);
-      __ CallRuntime(Runtime::kTraceExit, 1);
-    }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ j(positive, &ok, Label::kNear);
-    __ push(eax);
-    __ call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(eax);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
-#ifdef DEBUG
-    // Add a label for checking the size of the code used for returning.
-    Label check_exit_codesize;
-    masm_->bind(&check_exit_codesize);
-#endif
-    SetSourcePosition(function()->end_position() - 1);
-    __ RecordJSReturn();
-    // Do not use the leave instruction here because it is too short to
-    // patch with the code required by the debugger.
-    __ mov(esp, ebp);
-    int no_frame_start = masm_->pc_offset();
-    __ pop(ebp);
-
-    int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
-    __ Ret(arguments_bytes, ecx);
-    // Check that the size of the code used for returning is large enough
-    // for the debugger's requirements.
-    DCHECK(Assembler::kJSReturnSequenceLength <=
-           masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
-  }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-  codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-  MemOperand operand = codegen()->VarOperand(var, result_register());
-  // Memory operands can be pushed directly.
-  __ push(operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
-  // For simplicity we always test the accumulator register.
-  codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-  UNREACHABLE();  // Not used on IA32.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
-    Heap::RootListIndex index) const {
-  UNREACHABLE();  // Not used on IA32.
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
-    Heap::RootListIndex index) const {
-  UNREACHABLE();  // Not used on IA32.
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  UNREACHABLE();  // Not used on IA32.
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
-    Handle<Object> lit) const {
-  if (lit->IsSmi()) {
-    __ SafeMove(result_register(), Immediate(lit));
-  } else {
-    __ Move(result_register(), Immediate(lit));
-  }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  if (lit->IsSmi()) {
-    __ SafePush(Immediate(lit));
-  } else {
-    __ push(Immediate(lit));
-  }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
-                                          true,
-                                          true_label_,
-                                          false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
-  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    if (false_label_ != fall_through_) __ jmp(false_label_);
-  } else if (lit->IsTrue() || lit->IsJSObject()) {
-    if (true_label_ != fall_through_) __ jmp(true_label_);
-  } else if (lit->IsString()) {
-    if (String::cast(*lit)->length() == 0) {
-      if (false_label_ != fall_through_) __ jmp(false_label_);
-    } else {
-      if (true_label_ != fall_through_) __ jmp(true_label_);
-    }
-  } else if (lit->IsSmi()) {
-    if (Smi::cast(*lit)->value() == 0) {
-      if (false_label_ != fall_through_) __ jmp(false_label_);
-    } else {
-      if (true_label_ != fall_through_) __ jmp(true_label_);
-    }
-  } else {
-    // For simplicity we always test the accumulator register.
-    __ mov(result_register(), lit);
-    codegen()->DoTest(this);
-  }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
-                                                       Register reg) const {
-  DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
-  __ mov(Operand(esp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
-                                            Label* materialize_false) const {
-  DCHECK(materialize_true == materialize_false);
-  __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
-    Label* materialize_true,
-    Label* materialize_false) const {
-  Label done;
-  __ bind(materialize_true);
-  __ mov(result_register(), isolate()->factory()->true_value());
-  __ jmp(&done, Label::kNear);
-  __ bind(materialize_false);
-  __ mov(result_register(), isolate()->factory()->false_value());
-  __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
-    Label* materialize_true,
-    Label* materialize_false) const {
-  Label done;
-  __ bind(materialize_true);
-  __ push(Immediate(isolate()->factory()->true_value()));
-  __ jmp(&done, Label::kNear);
-  __ bind(materialize_false);
-  __ push(Immediate(isolate()->factory()->false_value()));
-  __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
-                                          Label* materialize_false) const {
-  DCHECK(materialize_true == true_label_);
-  DCHECK(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
-  Handle<Object> value = flag
-      ? isolate()->factory()->true_value()
-      : isolate()->factory()->false_value();
-  __ mov(result_register(), value);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
-  Handle<Object> value = flag
-      ? isolate()->factory()->true_value()
-      : isolate()->factory()->false_value();
-  __ push(Immediate(value));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
-                                          true,
-                                          true_label_,
-                                          false_label_);
-  if (flag) {
-    if (true_label_ != fall_through_) __ jmp(true_label_);
-  } else {
-    if (false_label_ != fall_through_) __ jmp(false_label_);
-  }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
-                               Label* if_true,
-                               Label* if_false,
-                               Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
-  CallIC(ic, condition->test_id());
-  __ test(result_register(), result_register());
-  // The stub returns nonzero for true.
-  Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
-                              Label* if_true,
-                              Label* if_false,
-                              Label* fall_through) {
-  if (if_false == fall_through) {
-    __ j(cc, if_true);
-  } else if (if_true == fall_through) {
-    __ j(NegateCondition(cc), if_false);
-  } else {
-    __ j(cc, if_true);
-    __ jmp(if_false);
-  }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  DCHECK(var->IsStackAllocated());
-  // Offset is negative because higher indexes are at lower addresses.
-  int offset = -var->index() * kPointerSize;
-  // Adjust by a (parameter or local) base offset.
-  if (var->IsParameter()) {
-    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
-  } else {
-    offset += JavaScriptFrameConstants::kLocal0Offset;
-  }
-  return Operand(ebp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
-  if (var->IsContextSlot()) {
-    int context_chain_length = scope()->ContextChainLength(var->scope());
-    __ LoadContext(scratch, context_chain_length);
-    return ContextOperand(scratch, var->index());
-  } else {
-    return StackOperand(var);
-  }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
-  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
-  MemOperand location = VarOperand(var, dest);
-  __ mov(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
-                               Register src,
-                               Register scratch0,
-                               Register scratch1) {
-  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
-  DCHECK(!scratch0.is(src));
-  DCHECK(!scratch0.is(scratch1));
-  DCHECK(!scratch1.is(src));
-  MemOperand location = VarOperand(var, scratch0);
-  __ mov(location, src);
-
-  // Emit the write barrier code if the location is in the heap.
-  if (var->IsContextSlot()) {
-    int offset = Context::SlotOffset(var->index());
-    DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
-  }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
-                                                     bool should_normalize,
-                                                     Label* if_true,
-                                                     Label* if_false) {
-  // Only prepare for bailouts before splits if we're in a test
-  // context. Otherwise, we let the Visit function deal with the
-  // preparation to avoid preparing with the same AST id twice.
-  if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
-  Label skip;
-  if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
-  if (should_normalize) {
-    __ cmp(eax, isolate()->factory()->true_value());
-    Split(equal, if_true, if_false, NULL);
-    __ bind(&skip);
-  }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
-  // The variable in the declaration always resides in the current context.
-  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
-    // Check that we're not inside a with or catch context.
-    __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
-    __ cmp(ebx, isolate()->factory()->with_context_map());
-    __ Check(not_equal, kDeclarationInWithContext);
-    __ cmp(ebx, isolate()->factory()->catch_context_map());
-    __ Check(not_equal, kDeclarationInCatchContext);
-  }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
-    VariableDeclaration* declaration) {
-  // If it was not possible to allocate the variable at compile time, we
-  // need to "declare" it at runtime to make sure it actually exists in the
-  // local context.
-  VariableProxy* proxy = declaration->proxy();
-  VariableMode mode = declaration->mode();
-  Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
-  switch (variable->location()) {
-    case Variable::UNALLOCATED:
-      globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(), zone());
-      break;
-
-    case Variable::PARAMETER:
-    case Variable::LOCAL:
-      if (hole_init) {
-        Comment cmnt(masm_, "[ VariableDeclaration");
-        __ mov(StackOperand(variable),
-               Immediate(isolate()->factory()->the_hole_value()));
-      }
-      break;
-
-    case Variable::CONTEXT:
-      if (hole_init) {
-        Comment cmnt(masm_, "[ VariableDeclaration");
-        EmitDebugCheckDeclarationContext(variable);
-        __ mov(ContextOperand(esi, variable->index()),
-               Immediate(isolate()->factory()->the_hole_value()));
-        // No write barrier since the hole value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      }
-      break;
-
-    case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      __ push(esi);
-      __ push(Immediate(variable->name()));
-      // VariableDeclaration nodes are always introduced in one of four modes.
-      DCHECK(IsDeclaredVariableMode(mode));
-      PropertyAttributes attr =
-          IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
-      __ push(Immediate(Smi::FromInt(attr)));
-      // Push initial value, if any.
-      // Note: For variables we must not push an initial value (such as
-      // 'undefined') because we may have a (legal) redeclaration and we
-      // must not destroy the current value.
-      if (hole_init) {
-        __ push(Immediate(isolate()->factory()->the_hole_value()));
-      } else {
-        __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-      }
-      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
-      break;
-    }
-  }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
-    FunctionDeclaration* declaration) {
-  VariableProxy* proxy = declaration->proxy();
-  Variable* variable = proxy->var();
-  switch (variable->location()) {
-    case Variable::UNALLOCATED: {
-      globals_->Add(variable->name(), zone());
-      Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
-      // Check for stack-overflow exception.
-      if (function.is_null()) return SetStackOverflow();
-      globals_->Add(function, zone());
-      break;
-    }
-
-    case Variable::PARAMETER:
-    case Variable::LOCAL: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      VisitForAccumulatorValue(declaration->fun());
-      __ mov(StackOperand(variable), result_register());
-      break;
-    }
-
-    case Variable::CONTEXT: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      EmitDebugCheckDeclarationContext(variable);
-      VisitForAccumulatorValue(declaration->fun());
-      __ mov(ContextOperand(esi, variable->index()), result_register());
-      // We know that we have written a function, which is not a smi.
-      __ RecordWriteContextSlot(esi,
-                                Context::SlotOffset(variable->index()),
-                                result_register(),
-                                ecx,
-                                kDontSaveFPRegs,
-                                EMIT_REMEMBERED_SET,
-                                OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      break;
-    }
-
-    case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ push(esi);
-      __ push(Immediate(variable->name()));
-      __ push(Immediate(Smi::FromInt(NONE)));
-      VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
-      break;
-    }
-  }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
-  Variable* variable = declaration->proxy()->var();
-  DCHECK(variable->location() == Variable::CONTEXT);
-  DCHECK(variable->interface()->IsFrozen());
-
-  Comment cmnt(masm_, "[ ModuleDeclaration");
-  EmitDebugCheckDeclarationContext(variable);
-
-  // Load instance object.
-  __ LoadContext(eax, scope_->ContextChainLength(scope_->ScriptScope()));
-  __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
-  __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
-
-  // Assign it.
-  __ mov(ContextOperand(esi, variable->index()), eax);
-  // We know that we have written a module, which is not a smi.
-  __ RecordWriteContextSlot(esi,
-                            Context::SlotOffset(variable->index()),
-                            eax,
-                            ecx,
-                            kDontSaveFPRegs,
-                            EMIT_REMEMBERED_SET,
-                            OMIT_SMI_CHECK);
-  PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
-  // Traverse into body.
-  Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
-  VariableProxy* proxy = declaration->proxy();
-  Variable* variable = proxy->var();
-  switch (variable->location()) {
-    case Variable::UNALLOCATED:
-      // TODO(rossberg)
-      break;
-
-    case Variable::CONTEXT: {
-      Comment cmnt(masm_, "[ ImportDeclaration");
-      EmitDebugCheckDeclarationContext(variable);
-      // TODO(rossberg)
-      break;
-    }
-
-    case Variable::PARAMETER:
-    case Variable::LOCAL:
-    case Variable::LOOKUP:
-      UNREACHABLE();
-  }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
-  // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.
-  __ push(esi);  // The context is the first argument.
-  __ Push(pairs);
-  __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ CallRuntime(Runtime::kDeclareGlobals, 3);
-  // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
-  // Call the runtime to declare the modules.
-  __ Push(descriptions);
-  __ CallRuntime(Runtime::kDeclareModules, 1);
-  // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  Comment cmnt(masm_, "[ SwitchStatement");
-  Breakable nested_statement(this, stmt);
-  SetStatementPosition(stmt);
-
-  // Keep the switch value on the stack until a case matches.
-  VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
-  ZoneList<CaseClause*>* clauses = stmt->cases();
-  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
-
-  Label next_test;  // Recycled for each test.
-  // Compile all the tests with branches to their bodies.
-  for (int i = 0; i < clauses->length(); i++) {
-    CaseClause* clause = clauses->at(i);
-    clause->body_target()->Unuse();
-
-    // The default is not a test, but remember it as final fall through.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    __ bind(&next_test);
-    next_test.Unuse();
-
-    // Compile the label expression.
-    VisitForAccumulatorValue(clause->label());
-
-    // Perform the comparison as if via '==='.
-    __ mov(edx, Operand(esp, 0));  // Switch value.
-    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
-    JumpPatchSite patch_site(masm_);
-    if (inline_smi_code) {
-      Label slow_case;
-      __ mov(ecx, edx);
-      __ or_(ecx, eax);
-      patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-
-      __ cmp(edx, eax);
-      __ j(not_equal, &next_test);
-      __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target());
-      __ bind(&slow_case);
-    }
-
-    // Record position before stub call for type feedback.
-    SetSourcePosition(clause->position());
-    Handle<Code> ic =
-        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
-    CallIC(ic, clause->CompareId());
-    patch_site.EmitPatchInfo();
-
-    Label skip;
-    __ jmp(&skip, Label::kNear);
-    PrepareForBailout(clause, TOS_REG);
-    __ cmp(eax, isolate()->factory()->true_value());
-    __ j(not_equal, &next_test);
-    __ Drop(1);
-    __ jmp(clause->body_target());
-    __ bind(&skip);
-
-    __ test(eax, eax);
-    __ j(not_equal, &next_test);
-    __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target());
-  }
-
-  // Discard the test value and jump to the default if present, otherwise to
-  // the end of the statement.
-  __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
-  if (default_clause == NULL) {
-    __ jmp(nested_statement.break_label());
-  } else {
-    __ jmp(default_clause->body_target());
-  }
-
-  // Compile all the case bodies.
-  for (int i = 0; i < clauses->length(); i++) {
-    Comment cmnt(masm_, "[ Case body");
-    CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
-    VisitStatements(clause->statements());
-  }
-
-  __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  Comment cmnt(masm_, "[ ForInStatement");
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
-
-  SetStatementPosition(stmt);
-
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
-  SetExpressionPosition(stmt->enumerable());
-  VisitForAccumulatorValue(stmt->enumerable());
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  __ j(equal, &exit);
-  __ cmp(eax, isolate()->factory()->null_value());
-  __ j(equal, &exit);
-
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
-  Label convert, done_convert;
-  __ JumpIfSmi(eax, &convert, Label::kNear);
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &done_convert, Label::kNear);
-  __ bind(&convert);
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
-  __ push(eax);
-
-  // Check for proxies.
-  Label call_runtime, use_cache, fixed_array;
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  __ j(below_equal, &call_runtime);
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  __ CheckEnumCache(&call_runtime);
-
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ jmp(&use_cache, Label::kNear);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(eax);
-  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         isolate()->factory()->meta_map());
-  __ j(not_equal, &fixed_array);
-
-
-  // We got a map in register eax. Get the enumeration cache from it.
-  Label no_descriptors;
-  __ bind(&use_cache);
-
-  __ EnumLength(edx, eax);
-  __ cmp(edx, Immediate(Smi::FromInt(0)));
-  __ j(equal, &no_descriptors);
-
-  __ LoadInstanceDescriptors(eax, ecx);
-  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
-  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  // Set up the four remaining stack slots.
-  __ push(eax);  // Map.
-  __ push(ecx);  // Enumeration cache.
-  __ push(edx);  // Number of valid entries for the map in the enum cache.
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
-  __ jmp(&loop);
-
-  __ bind(&no_descriptors);
-  __ add(esp, Immediate(kPointerSize));
-  __ jmp(&exit);
-
-  // We got a fixed array in register eax. Iterate through that.
-  Label non_proxy;
-  __ bind(&fixed_array);
-
-  // No need for a write barrier, we are storing a Smi in the feedback vector.
-  __ LoadHeapObject(ebx, FeedbackVector());
-  int vector_index = FeedbackVector()->GetIndex(slot);
-  __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
-         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-
-  __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
-  __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
-  __ j(above, &non_proxy);
-  __ Move(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
-  __ bind(&non_proxy);
-  __ push(ebx);  // Smi
-  __ push(eax);  // Array
-  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
-  __ push(eax);  // Fixed array length (as smi).
-  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
-
-  // Generate code for doing the condition check.
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
-  __ bind(&loop);
-  SetExpressionPosition(stmt->each());
-
-  __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
-  __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
-  __ j(above_equal, loop_statement.break_label());
-
-  // Get the current entry of the array into register ebx.
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-  __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
-
-  // Get the expected map from the stack or a smi in the
-  // permanent slow case into register edx.
-  __ mov(edx, Operand(esp, 3 * kPointerSize));
-
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we may have to filter the key.
-  Label update_each;
-  __ mov(ecx, Operand(esp, 4 * kPointerSize));
-  __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ j(equal, &update_each, Label::kNear);
-
-  // For proxies, no filtering is done.
-  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  DCHECK(Smi::FromInt(0) == 0);
-  __ test(edx, edx);
-  __ j(zero, &update_each);
-
-  // Convert the entry to a string or null if it isn't a property
-  // anymore. If the property has been removed while iterating, we
-  // just skip it.
-  __ push(ecx);  // Enumerable.
-  __ push(ebx);  // Current entry.
-  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ test(eax, eax);
-  __ j(equal, loop_statement.continue_label());
-  __ mov(ebx, eax);
-
-  // Update the 'each' property or variable from the possibly filtered
-  // entry in register ebx.
-  __ bind(&update_each);
-  __ mov(result_register(), ebx);
-  // Perform the assignment as if via '='.
-  { EffectContext context(this);
-    EmitAssignment(stmt->each());
-  }
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Generate code for going to the next element by incrementing the
-  // index (smi) stored on top of the stack.
-  __ bind(loop_statement.continue_label());
-  __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-
-  EmitBackEdgeBookkeeping(stmt, &loop);
-  __ jmp(&loop);
-
-  // Remove the pointers stored on the stack.
-  __ bind(loop_statement.break_label());
-  __ add(esp, Immediate(5 * kPointerSize));
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(&exit);
-  decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
-    __ mov(ebx, Immediate(info));
-    __ CallStub(&stub);
-  } else {
-    __ push(esi);
-    __ push(Immediate(info));
-    __ push(Immediate(pretenure
-                      ? isolate()->factory()->true_value()
-                      : isolate()->factory()->false_value()));
-    __ CallRuntime(Runtime::kNewClosure, 3);
-  }
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
-  Comment cnmt(masm_, "[ SuperReference ");
-
-  __ mov(LoadDescriptor::ReceiverRegister(),
-         Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
-  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
-  __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
-
-  if (FLAG_vector_ics) {
-    __ mov(VectorLoadICDescriptor::SlotRegister(),
-           Immediate(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
-    CallLoadIC(NOT_CONTEXTUAL);
-  } else {
-    CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
-  }
-
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  Label done;
-  __ j(not_equal, &done);
-  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
-  __ bind(&done);
-}
-
-
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
-                                                  int offset) {
-  if (NeedsHomeObject(initializer)) {
-    __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-    __ mov(StoreDescriptor::NameRegister(),
-           Immediate(isolate()->factory()->home_object_symbol()));
-    __ mov(StoreDescriptor::ValueRegister(),
-           Operand(esp, offset * kPointerSize));
-    CallStoreIC();
-  }
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofState typeof_state,
-                                                      Label* slow) {
-  Register context = esi;
-  Register temp = edx;
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        __ j(not_equal, slow);
-      }
-      // Load next context in chain.
-      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering esi.
-      context = temp;
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s != NULL && s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(temp)) {
-      __ mov(temp, context);
-    }
-    __ bind(&next);
-    // Terminate at native context.
-    __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
-           Immediate(isolate()->factory()->native_context_map()));
-    __ j(equal, &fast, Label::kNear);
-    // Check that extension is NULL.
-    __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
-    __ j(not_equal, slow);
-    // Load next context in chain.
-    __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
-  __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
-  if (FLAG_vector_ics) {
-    __ mov(VectorLoadICDescriptor::SlotRegister(),
-           Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-  }
-
-  ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
-      ? NOT_CONTEXTUAL
-      : CONTEXTUAL;
-
-  CallLoadIC(mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = esi;
-  Register temp = ebx;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        __ j(not_equal, slow);
-      }
-      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering esi.
-      context = temp;
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  __ j(not_equal, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an esi-based operand (the write barrier cannot be allowed to
-  // destroy the esi register).
-  return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofState typeof_state,
-                                                  Label* slow,
-                                                  Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
-    __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
-      __ cmp(eax, isolate()->factory()->the_hole_value());
-      __ j(not_equal, done);
-      if (local->mode() == CONST_LEGACY) {
-        __ mov(eax, isolate()->factory()->undefined_value());
-      } else {  // LET || CONST
-        __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kThrowReferenceError, 1);
-      }
-    }
-    __ jmp(done);
-  }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
-  // Record position before possible IC call.
-  SetSourcePosition(proxy->position());
-  Variable* var = proxy->var();
-
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
-  switch (var->location()) {
-    case Variable::UNALLOCATED: {
-      Comment cmnt(masm_, "[ Global variable");
-      __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
-      __ mov(LoadDescriptor::NameRegister(), var->name());
-      if (FLAG_vector_ics) {
-        __ mov(VectorLoadICDescriptor::SlotRegister(),
-               Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-      }
-      CallLoadIC(CONTEXTUAL);
-      context()->Plug(eax);
-      break;
-    }
-
-    case Variable::PARAMETER:
-    case Variable::LOCAL:
-    case Variable::CONTEXT: {
-      Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
-                                               : "[ Stack variable");
-      if (var->binding_needs_init()) {
-        // var->scope() may be NULL when the proxy is located in eval code and
-        // refers to a potential outside binding. Currently those bindings are
-        // always looked up dynamically, i.e. in that case
-        //     var->location() == LOOKUP.
-        // always holds.
-        DCHECK(var->scope() != NULL);
-
-        // Check if the binding really needs an initialization check. The check
-        // can be skipped in the following situation: we have a LET or CONST
-        // binding in harmony mode, both the Variable and the VariableProxy have
-        // the same declaration scope (i.e. they are both in global code, in the
-        // same function or in the same eval code) and the VariableProxy is in
-        // the source physically located after the initializer of the variable.
-        //
-        // We cannot skip any initialization checks for CONST in non-harmony
-        // mode because const variables may be declared but never initialized:
-        //   if (false) { const x; }; var y = x;
-        //
-        // The condition on the declaration scopes is a conservative check for
-        // nested functions that access a binding and are called before the
-        // binding is initialized:
-        //   function() { f(); let x = 1; function f() { x = 2; } }
-        //
-        bool skip_init_check;
-        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
-          skip_init_check = false;
-        } else {
-          // Check that we always have valid source position.
-          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
-          DCHECK(proxy->position() != RelocInfo::kNoPosition);
-          skip_init_check = var->mode() != CONST_LEGACY &&
-              var->initializer_position() < proxy->position();
-        }
-
-        if (!skip_init_check) {
-          // Let and const need a read barrier.
-          Label done;
-          GetVar(eax, var);
-          __ cmp(eax, isolate()->factory()->the_hole_value());
-          __ j(not_equal, &done, Label::kNear);
-          if (var->mode() == LET || var->mode() == CONST) {
-            // Throw a reference error when using an uninitialized let/const
-            // binding in harmony mode.
-            __ push(Immediate(var->name()));
-            __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          } else {
-            // Uninitalized const bindings outside of harmony mode are unholed.
-            DCHECK(var->mode() == CONST_LEGACY);
-            __ mov(eax, isolate()->factory()->undefined_value());
-          }
-          __ bind(&done);
-          context()->Plug(eax);
-          break;
-        }
-      }
-      context()->Plug(var);
-      break;
-    }
-
-    case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
-      __ bind(&slow);
-      __ push(esi);  // Context.
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
-      __ bind(&done);
-      context()->Plug(eax);
-      break;
-    }
-  }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  Comment cmnt(masm_, "[ RegExpLiteral");
-  Label materialized;
-  // Registers will be used as follows:
-  // edi = JS function.
-  // ecx = literals array.
-  // ebx = regexp literal.
-  // eax = regexp literal clone.
-  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
-  int literal_offset =
-      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
-  __ mov(ebx, FieldOperand(ecx, literal_offset));
-  __ cmp(ebx, isolate()->factory()->undefined_value());
-  __ j(not_equal, &materialized, Label::kNear);
-
-  // Create regexp literal using runtime function
-  // Result will be in eax.
-  __ push(ecx);
-  __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(expr->pattern()));
-  __ push(Immediate(expr->flags()));
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  __ mov(ebx, eax);
-
-  __ bind(&materialized);
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-  Label allocated, runtime_allocate;
-  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ push(ebx);
-  __ push(Immediate(Smi::FromInt(size)));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  __ pop(ebx);
-
-  __ bind(&allocated);
-  // Copy the content into the newly allocated memory.
-  // (Unroll copy loop once for better throughput).
-  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
-    __ mov(edx, FieldOperand(ebx, i));
-    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
-    __ mov(FieldOperand(eax, i), edx);
-    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
-  }
-  if ((size % (2 * kPointerSize)) != 0) {
-    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
-    __ mov(FieldOperand(eax, size - kPointerSize), edx);
-  }
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
-  if (expression == NULL) {
-    __ push(Immediate(isolate()->factory()->null_value()));
-  } else {
-    VisitForStackValue(expression);
-  }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  expr->BuildConstantProperties(isolate());
-  Handle<FixedArray> constant_properties = expr->constant_properties();
-  int flags = expr->fast_elements()
-      ? ObjectLiteral::kFastElements
-      : ObjectLiteral::kNoFlags;
-  flags |= expr->has_function()
-      ? ObjectLiteral::kHasFunction
-      : ObjectLiteral::kNoFlags;
-  int properties_count = constant_properties->length() / 2;
-  if (expr->may_store_doubles() || expr->depth() > 1 ||
-      masm()->serializer_enabled() ||
-      flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
-    __ push(Immediate(Smi::FromInt(expr->literal_index())));
-    __ push(Immediate(constant_properties));
-    __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
-    __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
-    __ mov(ecx, Immediate(constant_properties));
-    __ mov(edx, Immediate(Smi::FromInt(flags)));
-    FastCloneShallowObjectStub stub(isolate(), properties_count);
-    __ CallStub(&stub);
-  }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
-
-  // If result_saved is true the result is on top of the stack.  If
-  // result_saved is false the result is in eax.
-  bool result_saved = false;
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  expr->CalculateEmitStore(zone());
-
-  AccessorTable accessor_table(zone());
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    ObjectLiteral::Property* property = expr->properties()->at(i);
-    if (property->IsCompileTimeValue()) continue;
-
-    Literal* key = property->key();
-    Expression* value = property->value();
-    if (!result_saved) {
-      __ push(eax);  // Save result on the stack
-      result_saved = true;
-    }
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        UNREACHABLE();
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
-        // Fall through.
-      case ObjectLiteral::Property::COMPUTED:
-        // It is safe to use [[Put]] here because the boilerplate already
-        // contains computed properties with an uninitialized value.
-        if (key->value()->IsInternalizedString()) {
-          if (property->emit_store()) {
-            VisitForAccumulatorValue(value);
-            DCHECK(StoreDescriptor::ValueRegister().is(eax));
-            __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
-            __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-            CallStoreIC(key->LiteralFeedbackId());
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
-
-            if (NeedsHomeObject(value)) {
-              __ mov(StoreDescriptor::ReceiverRegister(), eax);
-              __ mov(StoreDescriptor::NameRegister(),
-                     Immediate(isolate()->factory()->home_object_symbol()));
-              __ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
-              CallStoreIC();
-            }
-          } else {
-            VisitForEffect(value);
-          }
-          break;
-        }
-        __ push(Operand(esp, 0));  // Duplicate receiver.
-        VisitForStackValue(key);
-        VisitForStackValue(value);
-        if (property->emit_store()) {
-          EmitSetHomeObjectIfNeeded(value, 2);
-          __ push(Immediate(Smi::FromInt(SLOPPY)));  // Strict mode
-          __ CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          __ Drop(3);
-        }
-        break;
-      case ObjectLiteral::Property::PROTOTYPE:
-        __ push(Operand(esp, 0));  // Duplicate receiver.
-        VisitForStackValue(value);
-        if (property->emit_store()) {
-          __ CallRuntime(Runtime::kInternalSetPrototype, 2);
-        } else {
-          __ Drop(2);
-        }
-        break;
-      case ObjectLiteral::Property::GETTER:
-        accessor_table.lookup(key)->second->getter = value;
-        break;
-      case ObjectLiteral::Property::SETTER:
-        accessor_table.lookup(key)->second->setter = value;
-        break;
-    }
-  }
-
-  // Emit code to define accessors, using only a single call to the runtime for
-  // each pair of corresponding getters and setters.
-  for (AccessorTable::Iterator it = accessor_table.begin();
-       it != accessor_table.end();
-       ++it) {
-    __ push(Operand(esp, 0));  // Duplicate receiver.
-    VisitForStackValue(it->first);
-    EmitAccessor(it->second->getter);
-    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
-    EmitAccessor(it->second->setter);
-    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
-    __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
-  }
-
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ push(Operand(esp, 0));
-    __ CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  if (result_saved) {
-    context()->PlugTOS();
-  } else {
-    context()->Plug(eax);
-  }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  expr->BuildConstantElements(isolate());
-  int flags = expr->depth() == 1
-      ? ArrayLiteral::kShallowElements
-      : ArrayLiteral::kNoFlags;
-
-  ZoneList<Expression*>* subexprs = expr->values();
-  int length = subexprs->length();
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  DCHECK_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-  bool has_constant_fast_elements =
-      IsFastObjectElementsKind(constant_elements_kind);
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
-
-  if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
-    __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
-    __ push(Immediate(Smi::FromInt(expr->literal_index())));
-    __ push(Immediate(constant_elements));
-    __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
-  } else {
-    __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
-    __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
-    __ mov(ecx, Immediate(constant_elements));
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
-  }
-
-  bool result_saved = false;  // Is the result saved to the stack?
-
-  // Emit code to evaluate all the non-constant subexpressions and to store
-  // them into the newly cloned array.
-  for (int i = 0; i < length; i++) {
-    Expression* subexpr = subexprs->at(i);
-    // If the subexpression is a literal or a simple materialized literal it
-    // is already set in the cloned array.
-    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
-    if (!result_saved) {
-      __ push(eax);  // array literal.
-      __ push(Immediate(Smi::FromInt(expr->literal_index())));
-      result_saved = true;
-    }
-    VisitForAccumulatorValue(subexpr);
-
-    if (IsFastObjectElementsKind(constant_elements_kind)) {
-      // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
-      // cannot transition and don't need to call the runtime stub.
-      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-      __ mov(ebx, Operand(esp, kPointerSize));  // Copy of array literal.
-      __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-      // Store the subexpression value in the array's elements.
-      __ mov(FieldOperand(ebx, offset), result_register());
-      // Update the write barrier for the array store.
-      __ RecordWriteField(ebx, offset, result_register(), ecx,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          INLINE_SMI_CHECK);
-    } else {
-      // Store the subexpression value in the array's elements.
-      __ mov(ecx, Immediate(Smi::FromInt(i)));
-      StoreArrayLiteralElementStub stub(isolate());
-      __ CallStub(&stub);
-    }
-
-    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
-  }
-
-  if (result_saved) {
-    __ add(esp, Immediate(kPointerSize));  // literal index
-    context()->PlugTOS();
-  } else {
-    context()->Plug(eax);
-  }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  DCHECK(expr->target()->IsValidReferenceExpression());
-
-  Comment cmnt(masm_, "[ Assignment");
-
-  Property* property = expr->target()->AsProperty();
-  LhsKind assign_type = GetAssignType(property);
-
-  // Evaluate LHS expression.
-  switch (assign_type) {
-    case VARIABLE:
-      // Nothing to do here.
-      break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(property->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(property->obj()->AsSuperReference());
-      __ push(result_register());
-      if (expr->is_compound()) {
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
-      }
-      break;
-    case NAMED_PROPERTY:
-      if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the register.
-        VisitForStackValue(property->obj());
-        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
-      } else {
-        VisitForStackValue(property->obj());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(property->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(property->obj()->AsSuperReference());
-      __ Push(result_register());
-      VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
-      if (expr->is_compound()) {
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
-      }
-      break;
-    case KEYED_PROPERTY: {
-      if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
-        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
-        __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
-      } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
-      }
-      break;
-    }
-  }
-
-  // For compound assignments we need another deoptimization point after the
-  // variable/property load.
-  if (expr->is_compound()) {
-    AccumulatorValueContext result_context(this);
-    { AccumulatorValueContext left_operand_context(this);
-      switch (assign_type) {
-        case VARIABLE:
-          EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
-          break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
-          break;
-        case NAMED_PROPERTY:
-          EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
-          break;
-        case KEYED_PROPERTY:
-          EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
-          break;
-      }
-    }
-
-    Token::Value op = expr->binary_op();
-    __ push(eax);  // Left operand goes on the stack.
-    VisitForAccumulatorValue(expr->value());
-
-    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
-        ? OVERWRITE_RIGHT
-        : NO_OVERWRITE;
-    SetSourcePosition(expr->position() + 1);
-    if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr->binary_operation(),
-                            op,
-                            mode,
-                            expr->target(),
-                            expr->value());
-    } else {
-      EmitBinaryOp(expr->binary_operation(), op, mode);
-    }
-
-    // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
-  } else {
-    VisitForAccumulatorValue(expr->value());
-  }
-
-  // Record source position before possible IC call.
-  SetSourcePosition(expr->position());
-
-  // Store the value.
-  switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-      context()->Plug(eax);
-      break;
-    case NAMED_PROPERTY:
-      EmitNamedPropertyAssignment(expr);
-      break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
-    case KEYED_PROPERTY:
-      EmitKeyedPropertyAssignment(expr);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
-
-      __ jmp(&suspend);
-
-      __ bind(&continuation);
-      __ jmp(&resume);
-
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
-      __ cmp(esp, ebx);
-      __ j(equal, &post_runtime);
-      __ push(eax);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      __ pop(result_register());
-      EmitReturnSequence();
-
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ mov(FieldOperand(result_register(),
-                          JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
-      // Pop value from top-of-stack slot, box result into result register.
-      EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
-      break;
-    }
-
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ mov(eax, isolate()->factory()->undefined_value());
-      __ jmp(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ mov(load_name, isolate()->factory()->throw_string());  // "throw"
-      __ push(load_name);                                       // "throw"
-      __ push(Operand(esp, 2 * kPointerSize));                  // iter
-      __ push(eax);                                             // exception
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(eax);                                       // result
-      __ PushTryHandler(StackHandler::CATCH, expr->index());
-      const int handler_size = StackHandlerConstants::kSize;
-      __ push(eax);                                      // result
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ jmp(&l_resume);
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + handler_size;
-      __ mov(eax, Operand(esp, generator_object_depth));
-      __ push(eax);                                      // g
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(l_continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ pop(eax);                                       // result
-      EmitReturnSequence();
-      __ bind(&l_resume);                                // received in eax
-      __ PopTryHandler();
-
-      // receiver = iter; f = iter.next; arg = received;
-      __ bind(&l_next);
-
-      __ mov(load_name, isolate()->factory()->next_string());
-      __ push(load_name);                           // "next"
-      __ push(Operand(esp, 2 * kPointerSize));      // iter
-      __ push(eax);                                 // received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ mov(load_receiver, Operand(esp, kPointerSize));
-      if (FLAG_vector_ics) {
-        __ mov(VectorLoadICDescriptor::SlotRegister(),
-               Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      }
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(edi, eax);
-      __ mov(Operand(esp, 2 * kPointerSize), edi);
-      CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
-      __ CallStub(&stub);
-
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ push(eax);                                      // save result
-      __ Move(load_receiver, eax);                       // result
-      __ mov(load_name,
-             isolate()->factory()->done_string());       // "done"
-      if (FLAG_vector_ics) {
-        __ mov(VectorLoadICDescriptor::SlotRegister(),
-               Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
-      }
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.done in eax
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ test(eax, eax);
-      __ j(zero, &l_try);
-
-      // result.value
-      __ pop(load_receiver);                              // result
-      __ mov(load_name,
-             isolate()->factory()->value_string());       // "value"
-      if (FLAG_vector_ics) {
-        __ mov(VectorLoadICDescriptor::SlotRegister(),
-               Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
-      }
-      CallLoadIC(NOT_CONTEXTUAL);                         // result.value in eax
-      context()->DropAndPlug(2, eax);                     // drop iter and g
-      break;
-    }
-  }
-}
-
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in eax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // ebx will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  __ pop(ebx);
-
-  // Load suspended function and context.
-  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
-  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-
-  // Push receiver.
-  __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
-  // Push holes for arguments to generator function.
-  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(edx,
-         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ sub(edx, Immediate(Smi::FromInt(1)));
-  __ j(carry, &push_frame);
-  __ push(ecx);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS Function.
-
-  // Load the operand stack size.
-  __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
-  __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
-  __ SmiUntag(edx);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ cmp(edx, Immediate(0));
-    __ j(not_zero, &slow_resume);
-    __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(ecx);
-    __ add(edx, ecx);
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ jmp(edx);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ sub(edx, Immediate(1));
-  __ j(carry, &call_resume);
-  __ push(ecx);
-  __ jmp(&push_operand_holes);
-  __ bind(&call_resume);
-  __ push(ebx);
-  __ push(result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
-  // Not reached: the runtime call returns elsewhere.
-  __ Abort(kGeneratorFailedToResume);
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
-  Label gc_required;
-  Label allocated;
-
-  const int instance_size = 5 * kPointerSize;
-  DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
-            instance_size);
-
-  __ Allocate(instance_size, eax, ecx, edx, &gc_required, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&gc_required);
-  __ Push(Smi::FromInt(instance_size));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  __ mov(context_register(),
-         Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  __ bind(&allocated);
-  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
-  __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
-  __ pop(ecx);
-  __ mov(edx, isolate()->factory()->ToBoolean(done));
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
-  __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
-
-  // Only the value field needs a write barrier, as the other values are in the
-  // root set.
-  __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
-                      ecx, edx, kDontSaveFPRegs);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetSourcePosition(prop->position());
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(!prop->IsSuperAccess());
-
-  __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
-  if (FLAG_vector_ics) {
-    __ mov(VectorLoadICDescriptor::SlotRegister(),
-           Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-    CallLoadIC(NOT_CONTEXTUAL);
-  } else {
-    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
-  }
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetSourcePosition(prop->position());
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ push(Immediate(key->value()));
-  __ CallRuntime(Runtime::kLoadFromSuper, 3);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetSourcePosition(prop->position());
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
-  if (FLAG_vector_ics) {
-    __ mov(VectorLoadICDescriptor::SlotRegister(),
-           Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-    CallIC(ic);
-  } else {
-    CallIC(ic, prop->PropertyFeedbackId());
-  }
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetSourcePosition(prop->position());
-
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
-                                              Token::Value op,
-                                              OverwriteMode mode,
-                                              Expression* left,
-                                              Expression* right) {
-  // Do combined smi check of the operands. Left operand is on the
-  // stack. Right operand is in eax.
-  Label smi_case, done, stub_call;
-  __ pop(edx);
-  __ mov(ecx, eax);
-  __ or_(eax, edx);
-  JumpPatchSite patch_site(masm_);
-  patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
-
-  __ bind(&stub_call);
-  __ mov(eax, ecx);
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
-  CallIC(code, expr->BinaryOperationFeedbackId());
-  patch_site.EmitPatchInfo();
-  __ jmp(&done, Label::kNear);
-
-  // Smi case.
-  __ bind(&smi_case);
-  __ mov(eax, edx);  // Copy left operand in case of a stub call.
-
-  switch (op) {
-    case Token::SAR:
-      __ SmiUntag(ecx);
-      __ sar_cl(eax);  // No checks of result necessary
-      __ and_(eax, Immediate(~kSmiTagMask));
-      break;
-    case Token::SHL: {
-      Label result_ok;
-      __ SmiUntag(eax);
-      __ SmiUntag(ecx);
-      __ shl_cl(eax);
-      // Check that the *signed* result fits in a smi.
-      __ cmp(eax, 0xc0000000);
-      __ j(positive, &result_ok);
-      __ SmiTag(ecx);
-      __ jmp(&stub_call);
-      __ bind(&result_ok);
-      __ SmiTag(eax);
-      break;
-    }
-    case Token::SHR: {
-      Label result_ok;
-      __ SmiUntag(eax);
-      __ SmiUntag(ecx);
-      __ shr_cl(eax);
-      __ test(eax, Immediate(0xc0000000));
-      __ j(zero, &result_ok);
-      __ SmiTag(ecx);
-      __ jmp(&stub_call);
-      __ bind(&result_ok);
-      __ SmiTag(eax);
-      break;
-    }
-    case Token::ADD:
-      __ add(eax, ecx);
-      __ j(overflow, &stub_call);
-      break;
-    case Token::SUB:
-      __ sub(eax, ecx);
-      __ j(overflow, &stub_call);
-      break;
-    case Token::MUL: {
-      __ SmiUntag(eax);
-      __ imul(eax, ecx);
-      __ j(overflow, &stub_call);
-      __ test(eax, eax);
-      __ j(not_zero, &done, Label::kNear);
-      __ mov(ebx, edx);
-      __ or_(ebx, ecx);
-      __ j(negative, &stub_call);
-      break;
-    }
-    case Token::BIT_OR:
-      __ or_(eax, ecx);
-      break;
-    case Token::BIT_AND:
-      __ and_(eax, ecx);
-      break;
-    case Token::BIT_XOR:
-      __ xor_(eax, ecx);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in eax.
-  DCHECK(lit != NULL);
-  __ push(eax);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = ebx;
-  __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
-  __ Push(scratch);
-
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ObjectLiteral::Property* property = lit->properties()->at(i);
-    Literal* key = property->key()->AsLiteral();
-    Expression* value = property->value();
-    DCHECK(key != NULL);
-
-    if (property->is_static()) {
-      __ push(Operand(esp, kPointerSize));  // constructor
-    } else {
-      __ push(Operand(esp, 0));  // prototype
-    }
-    VisitForStackValue(key);
-    VisitForStackValue(value);
-    EmitSetHomeObjectIfNeeded(value, 2);
-
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-      case ObjectLiteral::Property::COMPUTED:
-      case ObjectLiteral::Property::PROTOTYPE:
-        __ CallRuntime(Runtime::kDefineClassMethod, 3);
-        break;
-
-      case ObjectLiteral::Property::GETTER:
-        __ CallRuntime(Runtime::kDefineClassGetter, 3);
-        break;
-
-      case ObjectLiteral::Property::SETTER:
-        __ CallRuntime(Runtime::kDefineClassSetter, 3);
-        break;
-
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  // prototype
-  __ CallRuntime(Runtime::kToFastProperties, 1);
-
-  // constructor
-  __ CallRuntime(Runtime::kToFastProperties, 1);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
-                                     Token::Value op,
-                                     OverwriteMode mode) {
-  __ pop(edx);
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
-  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(code, expr->BinaryOperationFeedbackId());
-  patch_site.EmitPatchInfo();
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  DCHECK(expr->IsValidReferenceExpression());
-
-  Property* prop = expr->AsProperty();
-  LhsKind assign_type = GetAssignType(prop);
-
-  switch (assign_type) {
-    case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
-      EffectContext context(this);
-      EmitVariableAssignment(var, Token::ASSIGN);
-      break;
-    }
-    case NAMED_PROPERTY: {
-      __ push(eax);  // Preserve value.
-      VisitForAccumulatorValue(prop->obj());
-      __ Move(StoreDescriptor::ReceiverRegister(), eax);
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
-      CallStoreIC();
-      break;
-    }
-    case NAMED_SUPER_PROPERTY: {
-      __ push(eax);
-      VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(prop->obj()->AsSuperReference());
-      // stack: value, this; eax: home_object
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch, result_register());               // home_object
-      __ mov(eax, MemOperand(esp, kPointerSize));       // value
-      __ mov(scratch2, MemOperand(esp, 0));             // this
-      __ mov(MemOperand(esp, kPointerSize), scratch2);  // this
-      __ mov(MemOperand(esp, 0), scratch);              // home_object
-      // stack: this, home_object. eax: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      __ push(eax);
-      VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(prop->obj()->AsSuperReference());
-      __ push(result_register());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch2, MemOperand(esp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; eax: key, edx: value
-      __ mov(scratch, MemOperand(esp, kPointerSize));  // this
-      __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
-      __ mov(scratch, MemOperand(esp, 0));  // home_object
-      __ mov(MemOperand(esp, kPointerSize), scratch);
-      __ mov(MemOperand(esp, 0), eax);
-      __ mov(eax, scratch2);
-      // stack: this, home_object, key; eax: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_PROPERTY: {
-      __ push(eax);  // Preserve value.
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ Move(StoreDescriptor::NameRegister(), eax);
-      __ pop(StoreDescriptor::ReceiverRegister());  // Receiver.
-      __ pop(StoreDescriptor::ValueRegister());     // Restore value.
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
-      CallIC(ic);
-      break;
-    }
-  }
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
-    Variable* var, MemOperand location) {
-  __ mov(location, eax);
-  if (var->IsContextSlot()) {
-    __ mov(edx, eax);
-    int offset = Context::SlotOffset(var->index());
-    __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
-  }
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
-                                               Token::Value op) {
-  if (var->IsUnallocated()) {
-    // Global var, const, or let.
-    __ mov(StoreDescriptor::NameRegister(), var->name());
-    __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
-    CallStoreIC();
-
-  } else if (op == Token::INIT_CONST_LEGACY) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(eax);
-      __ push(esi);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
-    } else {
-      DCHECK(var->IsStackLocal() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, ecx);
-      __ mov(edx, location);
-      __ cmp(edx, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &skip, Label::kNear);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
-  } else if (var->mode() == LET && op != Token::INIT_LET) {
-    // Non-initializing assignment to let variable needs a write barrier.
-    DCHECK(!var->IsLookupSlot());
-    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-    Label assign;
-    MemOperand location = VarOperand(var, ecx);
-    __ mov(edx, location);
-    __ cmp(edx, isolate()->factory()->the_hole_value());
-    __ j(not_equal, &assign, Label::kNear);
-    __ push(Immediate(var->name()));
-    __ CallRuntime(Runtime::kThrowReferenceError, 1);
-    __ bind(&assign);
-    EmitStoreToStackLocalOrContextSlot(var, location);
-  } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ push(eax);  // Value.
-      __ push(esi);  // Context.
-      __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(strict_mode())));
-      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      MemOperand location = VarOperand(var, ecx);
-      if (generate_debug_code_ && op == Token::INIT_LET) {
-        // Check for an uninitialized let binding.
-        __ mov(edx, location);
-        __ cmp(edx, isolate()->factory()->the_hole_value());
-        __ Check(equal, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
-  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
-    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
-  }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
-  // Assignment to a property, using a named store IC.
-  // eax    : value
-  // esp[0] : receiver
-
-  Property* prop = expr->target()->AsProperty();
-  DCHECK(prop != NULL);
-  DCHECK(prop->key()->IsLiteral());
-
-  // Record source code position before IC call.
-  SetSourcePosition(expr->position());
-  __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
-  __ pop(StoreDescriptor::ReceiverRegister());
-  CallStoreIC(expr->AssignmentFeedbackId());
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  __ push(Immediate(key->value()));
-  __ push(eax);
-  __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
-                                          : Runtime::kStoreToSuper_Sloppy),
-                 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object, key
-
-  __ push(eax);
-  __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
-                                          : Runtime::kStoreKeyedToSuper_Sloppy),
-                 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
-  // Assignment to a property, using a keyed store IC.
-  // eax               : value
-  // esp[0]            : key
-  // esp[kPointerSize] : receiver
-
-  __ pop(StoreDescriptor::NameRegister());  // Key.
-  __ pop(StoreDescriptor::ReceiverRegister());
-  DCHECK(StoreDescriptor::ValueRegister().is(eax));
-  // Record source code position before IC call.
-  SetSourcePosition(expr->position());
-  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
-  CallIC(ic, expr->AssignmentFeedbackId());
-
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(expr->obj()->AsSuperReference());
-      __ push(result_register());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-    PrepareForBailoutForId(expr->LoadId(), TOS_REG);
-    context()->Plug(eax);
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ pop(LoadDescriptor::ReceiverRegister());                  // Object.
-      __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
-      EmitLoadHomeObject(expr->obj()->AsSuperReference());
-      __ push(result_register());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-    context()->Plug(eax);
-  }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               TypeFeedbackId ast_id) {
-  ic_total_count_++;
-  __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-
-  CallICState::CallType call_type =
-      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-  // Get the target function.
-  if (call_type == CallICState::FUNCTION) {
-    { StackValueContext context(this);
-      EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
-    }
-    // Push undefined as receiver. This is patched in the method prologue if it
-    // is a sloppy mode method.
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-  } else {
-    // Load the function from the receiver.
-    DCHECK(callee->IsProperty());
-    DCHECK(!callee->AsProperty()->IsSuperAccess());
-    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
-    EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
-    // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
-    __ mov(Operand(esp, kPointerSize), eax);
-  }
-
-  EmitCall(expr, call_type);
-}
-
-
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetSourcePosition(prop->position());
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
-  EmitLoadHomeObject(super_ref);
-  __ push(eax);
-  VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
-  __ push(Immediate(key->value()));
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  __ CallRuntime(Runtime::kLoadFromSuper, 3);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr, CallICState::METHOD);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
-                                                Expression* key) {
-  // Load the key.
-  VisitForAccumulatorValue(key);
-
-  Expression* callee = expr->expression();
-
-  // Load the function from the receiver.
-  DCHECK(callee->IsProperty());
-  __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
-  __ mov(LoadDescriptor::NameRegister(), eax);
-  EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
-
-  // Push the target function under the receiver.
-  __ push(Operand(esp, 0));
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetSourcePosition(prop->position());
-  // Load the function from the receiver.
-  SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
-  EmitLoadHomeObject(super_ref);
-  __ push(eax);
-  VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
-  VisitForStackValue(prop->key());
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
-  // Load the arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  { PreservePositionScope scope(masm()->positions_recorder());
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-  }
-
-  // Record source position of the IC call.
-  SetSourcePosition(expr->position());
-  Handle<Code> ic = CallIC::initialize_stub(
-      isolate(), arg_count, call_type);
-  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
-  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  // Don't assign a type feedback id to the IC, since type feedback is provided
-  // by the vector above.
-  CallIC(ic);
-
-  RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
-  // Push copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ push(Operand(esp, arg_count * kPointerSize));
-  } else {
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-  }
-
-  // Push the enclosing function.
-  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  // Push the receiver of the enclosing function.
-  __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
-  // Push the language mode.
-  __ push(Immediate(Smi::FromInt(strict_mode())));
-
-  // Push the start position of the scope the calls resides in.
-  __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
-  DCHECK(super_ref != NULL);
-  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ CallRuntime(Runtime::kGetPrototype, 1);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
-  // We want to verify that RecordJSReturnSite gets called on all paths
-  // through this function.  Avoid early returns.
-  expr->return_is_recorded_ = false;
-#endif
-
-  Comment cmnt(masm_, "[ Call");
-  Expression* callee = expr->expression();
-  Call::CallType call_type = expr->GetCallType(isolate());
-
-  if (call_type == Call::POSSIBLY_EVAL_CALL) {
-    // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
-    // to resolve the function we need to call and the receiver of the call.
-    // Then we call the resolved function using the given arguments.
-    ZoneList<Expression*>* args = expr->arguments();
-    int arg_count = args->length();
-    { PreservePositionScope pos_scope(masm()->positions_recorder());
-      VisitForStackValue(callee);
-      // Reserved receiver slot.
-      __ push(Immediate(isolate()->factory()->undefined_value()));
-      // Push the arguments.
-      for (int i = 0; i < arg_count; i++) {
-        VisitForStackValue(args->at(i));
-      }
-
-      // Push a copy of the function (found below the arguments) and
-      // resolve eval.
-      __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(arg_count);
-
-      // The runtime call returns a pair of values in eax (function) and
-      // edx (receiver). Touch up the stack with the right values.
-      __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
-      __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
-
-      PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-    }
-    // Record source position for debugger.
-    SetSourcePosition(expr->position());
-    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
-    __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-    __ CallStub(&stub);
-    RecordJSReturnSite(expr);
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    context()->DropAndPlug(1, eax);
-
-  } else if (call_type == Call::GLOBAL_CALL) {
-    EmitCallWithLoadIC(expr);
-
-  } else if (call_type == Call::LOOKUP_SLOT_CALL) {
-    // Call to a lookup slot (dynamically introduced variable).
-    VariableProxy* proxy = callee->AsVariableProxy();
-    Label slow, done;
-    { PreservePositionScope scope(masm()->positions_recorder());
-      // Generate code for loading from variables potentially shadowed by
-      // eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
-    }
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in eax) and
-    // the object holding it (returned in edx).
-    __ push(context_register());
-    __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
-    __ push(eax);  // Function.
-    __ push(edx);  // Receiver.
-    PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ jmp(&call, Label::kNear);
-      __ bind(&done);
-      // Push function.
-      __ push(eax);
-      // The receiver is implicitly the global receiver. Indicate this by
-      // passing the hole to the call function stub.
-      __ push(Immediate(isolate()->factory()->undefined_value()));
-      __ bind(&call);
-    }
-
-    // The receiver is either the global receiver or an object found by
-    // LoadContextSlot.
-    EmitCall(expr);
-
-  } else if (call_type == Call::PROPERTY_CALL) {
-    Property* property = callee->AsProperty();
-    bool is_named_call = property->key()->IsPropertyName();
-    if (property->IsSuperAccess()) {
-      if (is_named_call) {
-        EmitSuperCallWithLoadIC(expr);
-      } else {
-        EmitKeyedSuperCallWithLoadIC(expr);
-      }
-    } else {
-      {
-        PreservePositionScope scope(masm()->positions_recorder());
-        VisitForStackValue(property->obj());
-      }
-      if (is_named_call) {
-        EmitCallWithLoadIC(expr);
-      } else {
-        EmitKeyedCallWithLoadIC(expr, property->key());
-      }
-    }
-  } else if (call_type == Call::SUPER_CALL) {
-    SuperReference* super_ref = callee->AsSuperReference();
-    EmitLoadSuperConstructor(super_ref);
-    __ push(result_register());
-    VisitForStackValue(super_ref->this_var());
-    EmitCall(expr, CallICState::METHOD);
-  } else {
-    DCHECK(call_type == Call::OTHER_CALL);
-    // Call to an arbitrary expression not handled specially above.
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(callee);
-    }
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-    // Emit function call.
-    EmitCall(expr);
-  }
-
-#ifdef DEBUG
-  // RecordJSReturnSite should have been called.
-  DCHECK(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
-  Comment cmnt(masm_, "[ CallNew");
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  if (expr->expression()->IsSuperReference()) {
-    EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
-    __ push(result_register());
-  } else {
-    VisitForStackValue(expr->expression());
-  }
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetSourcePosition(expr->position());
-
-  // Load function and argument count into edi and eax.
-  __ Move(eax, Immediate(arg_count));
-  __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
-  // Record call targets in unoptimized code.
-  if (FLAG_pretenuring_call_new) {
-    EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
-           expr->CallNewFeedbackSlot().ToInt() + 1);
-  }
-
-  __ LoadHeapObject(ebx, FeedbackVector());
-  __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
-
-  CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
-  __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ test(eax, Immediate(kSmiTagMask));
-  Split(zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ test(eax, Immediate(kSmiTagMask | 0x80000000));
-  Split(zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ cmp(eax, isolate()->factory()->null_value());
-  __ j(equal, if_true);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
-  __ test(ecx, Immediate(1 << Map::kIsUndetectable));
-  __ j(not_zero, if_false);
-  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  __ j(below, if_false);
-  __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(below_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(above_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
-  __ test(ebx, Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(not_zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false, skip_lookup;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ AssertNotSmi(eax);
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
-            1 << Map::kStringWrapperSafeForDefaultValueOf);
-  __ j(not_zero, &skip_lookup);
-
-  // Check for fast case object. Return false for slow case objects.
-  __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ cmp(ecx, isolate()->factory()->hash_table_map());
-  __ j(equal, if_false);
-
-  // Look for valueOf string in the descriptor array, and indicate false if
-  // found. Since we omit an enumeration index check, if it is added via a
-  // transition that shares its descriptor array, this is a false positive.
-  Label entry, loop, done;
-
-  // Skip loop if no descriptors are valid.
-  __ NumberOfOwnDescriptors(ecx, ebx);
-  __ cmp(ecx, 0);
-  __ j(equal, &done);
-
-  __ LoadInstanceDescriptors(ebx, ebx);
-  // ebx: descriptor array.
-  // ecx: valid entries in the descriptor array.
-  // Calculate the end of the descriptor array.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kPointerSize == 4);
-  __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
-  __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
-  // Calculate location of the first key name.
-  __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
-  // Loop through all the keys in the descriptor array. If one of these is the
-  // internalized string "valueOf" the result is false.
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, FieldOperand(ebx, 0));
-  __ cmp(edx, isolate()->factory()->value_of_string());
-  __ j(equal, if_false);
-  __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
-  __ bind(&entry);
-  __ cmp(ebx, ecx);
-  __ j(not_equal, &loop);
-
-  __ bind(&done);
-
-  // Reload map as register ebx was used as temporary above.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
-  // Set the bit in the map to indicate that there is no local valueOf field.
-  __ or_(FieldOperand(ebx, Map::kBitField2Offset),
-         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-
-  __ bind(&skip_lookup);
-
-  // If a valueOf property is not found on the object check that its
-  // prototype is the un-modified String prototype. If not result is false.
-  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  __ JumpIfSmi(ecx, if_false);
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  __ mov(edx,
-         FieldOperand(edx, GlobalObject::kNativeContextOffset));
-  __ cmp(ecx,
-         ContextOperand(edx,
-                        Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-  __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
-  // Check if the exponent half is 0x80000000. Comparing against 1 and
-  // checking for overflow is the shortest possible encoding.
-  __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
-  __ j(no_overflow, if_false);
-  __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  Register map = ebx;
-  __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
-  __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
-  __ j(less, if_false);
-  __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(less_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Get the frame pointer for the calling frame.
-  __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker);
-  __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
-         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(ebx);
-  __ cmp(eax, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(edx, eax);
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  Label exit;
-  // Get the number of formal parameters.
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  __ AssertSmi(eax);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  Label done, null, function, non_function_constructor;
-
-  VisitForAccumulatorValue(args->at(0));
-
-  // If the object is a smi, we return null.
-  __ JumpIfSmi(eax, &null);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-  // Assume that there are only two callable types, and one of them is at
-  // either end of the type range for JS object types. Saves extra comparisons.
-  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
-  // Map is now in eax.
-  __ j(below, &null);
-  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                FIRST_SPEC_OBJECT_TYPE + 1);
-  __ j(equal, &function);
-
-  __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_SPEC_OBJECT_TYPE - 1);
-  __ j(equal, &function);
-  // Assume that there is no larger type.
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
-  // Check if the constructor in the map is a JS function.
-  __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
-  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &non_function_constructor);
-
-  // eax now contains the constructor function. Grab the
-  // instance class name from there.
-  __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
-  __ jmp(&done);
-
-  // Functions have class 'Function'.
-  __ bind(&function);
-  __ mov(eax, isolate()->factory()->Function_string());
-  __ jmp(&done);
-
-  // Objects with a non-function constructor have class 'Object'.
-  __ bind(&non_function_constructor);
-  __ mov(eax, isolate()->factory()->Object_string());
-  __ jmp(&done);
-
-  // Non-JS objects have class null.
-  __ bind(&null);
-  __ mov(eax, isolate()->factory()->null_value());
-
-  // All done.
-  __ bind(&done);
-
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
-  // Load the arguments on the stack and call the stub.
-  SubStringStub stub(isolate());
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
-  // Load the arguments on the stack and call the stub.
-  RegExpExecStub stub(isolate());
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 4);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  VisitForStackValue(args->at(3));
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label done;
-  // If the object is a smi return the object.
-  __ JumpIfSmi(eax, &done, Label::kNear);
-  // If the object is not a value type, return the object.
-  __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
-  __ j(not_equal, &done, Label::kNear);
-  __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  DCHECK_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label runtime, done, not_date_object;
-  Register object = eax;
-  Register result = eax;
-  Register scratch = ecx;
-
-  __ JumpIfSmi(object, &not_date_object);
-  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  __ j(not_equal, &not_date_object);
-
-  if (index->value() == 0) {
-    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
-    __ jmp(&done);
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch, Operand::StaticVariable(stamp));
-      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
-                                          kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ mov(Operand(esp, 0), object);
-    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ jmp(&done);
-  }
-
-  __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kThrowNotDateError, 0);
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-
-  __ pop(value);
-  __ pop(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-  }
-
-  __ SmiUntag(value);
-  __ SmiUntag(index);
-
-  if (FLAG_debug_code) {
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
-  }
-
-  __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(3, args->length());
-
-  Register string = eax;
-  Register index = ebx;
-  Register value = ecx;
-
-  VisitForStackValue(args->at(0));        // index
-  VisitForStackValue(args->at(1));        // value
-  VisitForAccumulatorValue(args->at(2));  // string
-  __ pop(value);
-  __ pop(index);
-
-  if (FLAG_debug_code) {
-    __ test(value, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ test(index, Immediate(kSmiTagMask));
-    __ Check(zero, kNonSmiValue);
-    __ SmiUntag(index);
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
-    __ SmiTag(index);
-  }
-
-  __ SmiUntag(value);
-  // No need to untag a smi for two-byte addressing.
-  __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
-           value);
-  context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
-  // Load the arguments on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-
-  MathPowStub stub(isolate(), MathPowStub::ON_STACK);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(ebx);  // eax = value. ebx = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(ebx, &done, Label::kNear);
-
-  // If the object is not a value type, return the value.
-  __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
-  __ j(not_equal, &done, Label::kNear);
-
-  // Store the value.
-  __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(args->length(), 1);
-
-  // Load the argument into eax and call the stub.
-  VisitForAccumulatorValue(args->at(0));
-
-  NumberToStringStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label done;
-  StringCharFromCodeGenerator generator(eax, ebx);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(ebx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = ebx;
-  Register index = eax;
-  Register result = edx;
-
-  __ pop(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharCodeAtGenerator generator(object,
-                                      index,
-                                      result,
-                                      &need_conversion,
-                                      &need_conversion,
-                                      &index_out_of_range,
-                                      STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // NaN.
-  __ Move(result, Immediate(isolate()->factory()->nan_value()));
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move the undefined value into the result register, which will
-  // trigger conversion.
-  __ Move(result, Immediate(isolate()->factory()->undefined_value()));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Register object = ebx;
-  Register index = eax;
-  Register scratch = edx;
-  Register result = eax;
-
-  __ pop(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm_);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ Move(result, Immediate(isolate()->factory()->empty_string()));
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Move(result, Immediate(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  NopRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm_, call_helper);
-
-  __ bind(&done);
-  context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(2, args->length());
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  __ pop(edx);
-  StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(2, args->length());
-
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-
-  StringCompareStub stub(isolate());
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() >= 2);
-
-  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
-  for (int i = 0; i < arg_count + 1; ++i) {
-    VisitForStackValue(args->at(i));
-  }
-  VisitForAccumulatorValue(args->last());  // Function.
-
-  Label runtime, done;
-  // Check for non-function argument (including proxy).
-  __ JumpIfSmi(eax, &runtime);
-  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &runtime);
-
-  // InvokeFunction requires the function in edi. Move it in there.
-  __ mov(edi, result_register());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ jmp(&done);
-
-  __ bind(&runtime);
-  __ push(eax);
-  __ CallRuntime(Runtime::kCall, args->length());
-  __ bind(&done);
-
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
-  // Load the arguments on the stack and call the stub.
-  RegExpConstructResultStub stub(isolate());
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(2));
-  __ pop(ebx);
-  __ pop(ecx);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(2, args->length());
-
-  DCHECK_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      isolate()->native_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort(kAttemptToUseUndefinedCache);
-    __ mov(eax, isolate()->factory()->undefined_value());
-    context()->Plug(eax);
-    return;
-  }
-
-  VisitForAccumulatorValue(args->at(1));
-
-  Register key = eax;
-  Register cache = ebx;
-  Register tmp = ecx;
-  __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
-  __ mov(cache,
-         FieldOperand(cache, GlobalObject::kNativeContextOffset));
-  __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ mov(cache,
-         FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-  Label done, not_found;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
-  // tmp now holds finger offset as a smi.
-  __ cmp(key, FixedArrayElementOperand(cache, tmp));
-  __ j(not_equal, &not_found);
-
-  __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
-  __ jmp(&done);
-
-  __ bind(&not_found);
-  // Call runtime to perform the lookup.
-  __ push(cache);
-  __ push(key);
-  __ CallRuntime(Runtime::kGetFromCache, 2);
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(zero, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-  VisitForAccumulatorValue(args->at(0));
-
-  __ AssertString(eax);
-
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  VisitForStackValue(args->at(1));
-  // Load this to eax (= array)
-  VisitForAccumulatorValue(args->at(0));
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = eax;
-  Register elements = no_reg;  // Will be eax.
-
-  Register index = edx;
-
-  Register string_length = ecx;
-
-  Register string = esi;
-
-  Register scratch = ebx;
-
-  Register array_length = edi;
-  Register result_pos = no_reg;  // Will be edi.
-
-  // Separator operand is already pushed.
-  Operand separator_operand = Operand(esp, 2 * kPointerSize);
-  Operand result_operand = Operand(esp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(esp, 0);
-  __ sub(esp, Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ JumpIfSmi(array, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ j(not_zero, &non_trivial_array);
-  __ mov(result_operand, isolate()->factory()->empty_string());
-  __ jmp(&done);
-
-  // Save the array length.
-  __ bind(&non_trivial_array);
-  __ mov(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Move(index, Immediate(0));
-  __ Move(string_length, Immediate(0));
-  // Loop condition: while (index < length).
-  // Live loop registers: index, array_length, string,
-  //                      scratch, string_length, elements.
-  if (generate_debug_code_) {
-    __ cmp(index, array_length);
-    __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ mov(string, FieldOperand(elements,
-                              index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  __ add(string_length,
-         FieldOperand(string, SeqOneByteString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ add(index, Immediate(1));
-  __ cmp(index, array_length);
-  __ j(less, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, 1);
-  __ j(not_equal, &not_size_one_array);
-  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ mov(result_operand, scratch);
-  __ jmp(&done);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths, as a smi.
-  // elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ mov(string, separator_operand);
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Add (separator length times array_length) - separator length
-  // to string_length.
-  __ mov(scratch, separator_operand);
-  __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
-  __ sub(string_length, scratch);  // May be negative, temporarily.
-  __ imul(scratch, array_length_operand);
-  __ j(overflow, &bailout);
-  __ add(string_length, scratch);
-  __ j(overflow, &bailout);
-
-  __ shr(string_length, 1);
-  // Live registers and stack values:
-  //   string_length
-  //   elements
-  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
-                           &bailout);
-  __ mov(result_operand, result_pos);
-  __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
-  __ mov(string, separator_operand);
-  __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case
-  __ mov(index, Immediate(0));
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-  __ bind(&loop_1_condition);
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_1);  // End while (index < length).
-  __ jmp(&done);
-
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ mov_b(separator_operand, scratch);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator character to the result.
-  __ mov_b(scratch, separator_operand);
-  __ mov_b(Operand(result_pos, 0), scratch);
-  __ inc(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator to the result.
-  __ mov(string, separator_operand);
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_3);  // End while (index < length).
-  __ jmp(&done);
-
-
-  __ bind(&bailout);
-  __ mov(result_operand, isolate()->factory()->undefined_value());
-  __ bind(&done);
-  __ mov(eax, result_operand);
-  // Drop temp values from the stack, and restore context register.
-  __ add(esp, Immediate(3 * kPointerSize));
-
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(isolate());
-  __ movzx_b(eax, Operand::StaticVariable(debug_is_active));
-  __ SmiTag(eax);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  if (expr->function() != NULL &&
-      expr->function()->intrinsic_type == Runtime::INLINE) {
-    Comment cmnt(masm_, "[ InlineRuntimeCall");
-    EmitInlineRuntimeCall(expr);
-    return;
-  }
-
-  Comment cmnt(masm_, "[ CallRuntime");
-  ZoneList<Expression*>* args = expr->arguments();
-
-  if (expr->is_jsruntime()) {
-    // Push the builtins object as receiver.
-    __ mov(eax, GlobalObjectOperand());
-    __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
-
-    // Load the function from the receiver.
-    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
-    __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
-    if (FLAG_vector_ics) {
-      __ mov(VectorLoadICDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
-      CallLoadIC(NOT_CONTEXTUAL);
-    } else {
-      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
-    }
-
-    // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
-    __ mov(Operand(esp, kPointerSize), eax);
-
-    // Code common for calls using the IC.
-    ZoneList<Expression*>* args = expr->arguments();
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    // Record source position of the IC call.
-    SetSourcePosition(expr->position());
-    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
-    __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-    __ CallStub(&stub);
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    context()->DropAndPlug(1, eax);
-
-  } else {
-    // Push the arguments ("left-to-right").
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    // Call the C runtime function.
-    __ CallRuntime(expr->function(), arg_count);
-
-    context()->Plug(eax);
-  }
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
-  switch (expr->op()) {
-    case Token::DELETE: {
-      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
-      Property* property = expr->expression()->AsProperty();
-      VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
-      if (property != NULL) {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
-        __ push(Immediate(Smi::FromInt(strict_mode())));
-        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        context()->Plug(eax);
-      } else if (proxy != NULL) {
-        Variable* var = proxy->var();
-        // Delete of an unqualified identifier is disallowed in strict mode
-        // but "delete this" is allowed.
-        DCHECK(strict_mode() == SLOPPY || var->is_this());
-        if (var->IsUnallocated()) {
-          __ push(GlobalObjectOperand());
-          __ push(Immediate(var->name()));
-          __ push(Immediate(Smi::FromInt(SLOPPY)));
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(eax);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
-          // Result of deleting non-global variables is false.  'this' is
-          // not really a variable, though we implement it as one.  The
-          // subexpression does not have side effects.
-          context()->Plug(var->is_this());
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ push(context_register());
-          __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
-          context()->Plug(eax);
-        }
-      } else {
-        // Result of deleting non-property, non-variable reference is true.
-        // The subexpression may have side effects.
-        VisitForEffect(expr->expression());
-        context()->Plug(true);
-      }
-      break;
-    }
-
-    case Token::VOID: {
-      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
-      VisitForEffect(expr->expression());
-      context()->Plug(isolate()->factory()->undefined_value());
-      break;
-    }
-
-    case Token::NOT: {
-      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      if (context()->IsEffect()) {
-        // Unary NOT has no side effects so it's only necessary to visit the
-        // subexpression.  Match the optimizing compiler by not branching.
-        VisitForEffect(expr->expression());
-      } else if (context()->IsTest()) {
-        const TestContext* test = TestContext::cast(context());
-        // The labels are swapped for the recursive call.
-        VisitForControl(expr->expression(),
-                        test->false_label(),
-                        test->true_label(),
-                        test->fall_through());
-        context()->Plug(test->true_label(), test->false_label());
-      } else {
-        // We handle value contexts explicitly rather than simply visiting
-        // for control and plugging the control flow into the context,
-        // because we need to prepare a pair of extra administrative AST ids
-        // for the optimizing compiler.
-        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
-        Label materialize_true, materialize_false, done;
-        VisitForControl(expr->expression(),
-                        &materialize_false,
-                        &materialize_true,
-                        &materialize_true);
-        __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ mov(eax, isolate()->factory()->true_value());
-        } else {
-          __ Push(isolate()->factory()->true_value());
-        }
-        __ jmp(&done, Label::kNear);
-        __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ mov(eax, isolate()->factory()->false_value());
-        } else {
-          __ Push(isolate()->factory()->false_value());
-        }
-        __ bind(&done);
-      }
-      break;
-    }
-
-    case Token::TYPEOF: {
-      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
-      { StackValueContext context(this);
-        VisitForTypeofValue(expr->expression());
-      }
-      __ CallRuntime(Runtime::kTypeof, 1);
-      context()->Plug(eax);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  DCHECK(expr->expression()->IsValidReferenceExpression());
-
-  Comment cmnt(masm_, "[ CountOperation");
-  SetSourcePosition(expr->position());
-
-  Property* prop = expr->expression()->AsProperty();
-  LhsKind assign_type = GetAssignType(prop);
-
-  // Evaluate expression and get value.
-  if (assign_type == VARIABLE) {
-    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
-    AccumulatorValueContext context(this);
-    EmitVariableLoad(expr->expression()->AsVariableProxy());
-  } else {
-    // Reserve space for result of postfix operation.
-    if (expr->is_postfix() && !context()->IsEffect()) {
-      __ push(Immediate(Smi::FromInt(0)));
-    }
-    switch (assign_type) {
-      case NAMED_PROPERTY: {
-        // Put the object both on the stack and in the register.
-        VisitForStackValue(prop->obj());
-        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
-        EmitNamedPropertyLoad(prop);
-        break;
-      }
-
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
-        EmitLoadHomeObject(prop->obj()->AsSuperReference());
-        __ push(result_register());
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
-        EmitLoadHomeObject(prop->obj()->AsSuperReference());
-        __ push(result_register());
-        VisitForAccumulatorValue(prop->key());
-        __ push(result_register());
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_PROPERTY: {
-        VisitForStackValue(prop->obj());
-        VisitForStackValue(prop->key());
-        __ mov(LoadDescriptor::ReceiverRegister(),
-               Operand(esp, kPointerSize));                       // Object.
-        __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));  // Key.
-        EmitKeyedPropertyLoad(prop);
-        break;
-      }
-
-      case VARIABLE:
-        UNREACHABLE();
-    }
-  }
-
-  // We need a second deoptimization point after loading the value
-  // in case evaluating the property load my have a side effect.
-  if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
-  } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
-  }
-
-  // Inline smi case if we are in a loop.
-  Label done, stub_call;
-  JumpPatchSite patch_site(masm_);
-  if (ShouldInlineSmiCase(expr->op())) {
-    Label slow;
-    patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
-
-    // Save result for postfix expressions.
-    if (expr->is_postfix()) {
-      if (!context()->IsEffect()) {
-        // Save the result on the stack. If we have a named or keyed property
-        // we store the result under the receiver that is currently on top
-        // of the stack.
-        switch (assign_type) {
-          case VARIABLE:
-            __ push(eax);
-            break;
-          case NAMED_PROPERTY:
-            __ mov(Operand(esp, kPointerSize), eax);
-            break;
-          case NAMED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 2 * kPointerSize), eax);
-            break;
-          case KEYED_PROPERTY:
-            __ mov(Operand(esp, 2 * kPointerSize), eax);
-            break;
-          case KEYED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 3 * kPointerSize), eax);
-            break;
-        }
-      }
-    }
-
-    if (expr->op() == Token::INC) {
-      __ add(eax, Immediate(Smi::FromInt(1)));
-    } else {
-      __ sub(eax, Immediate(Smi::FromInt(1)));
-    }
-    __ j(no_overflow, &done, Label::kNear);
-    // Call stub. Undo operation first.
-    if (expr->op() == Token::INC) {
-      __ sub(eax, Immediate(Smi::FromInt(1)));
-    } else {
-      __ add(eax, Immediate(Smi::FromInt(1)));
-    }
-    __ jmp(&stub_call, Label::kNear);
-    __ bind(&slow);
-  }
-  ToNumberStub convert_stub(isolate());
-  __ CallStub(&convert_stub);
-
-  // Save result for postfix expressions.
-  if (expr->is_postfix()) {
-    if (!context()->IsEffect()) {
-      // Save the result on the stack. If we have a named or keyed property
-      // we store the result under the receiver that is currently on top
-      // of the stack.
-      switch (assign_type) {
-        case VARIABLE:
-          __ push(eax);
-          break;
-        case NAMED_PROPERTY:
-          __ mov(Operand(esp, kPointerSize), eax);
-          break;
-        case NAMED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 2 * kPointerSize), eax);
-          break;
-        case KEYED_PROPERTY:
-          __ mov(Operand(esp, 2 * kPointerSize), eax);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 3 * kPointerSize), eax);
-          break;
-      }
-    }
-  }
-
-  // Record position before stub call.
-  SetSourcePosition(expr->position());
-
-  // Call stub for +1/-1.
-  __ bind(&stub_call);
-  __ mov(edx, eax);
-  __ mov(eax, Immediate(Smi::FromInt(1)));
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
-                                              NO_OVERWRITE).code();
-  CallIC(code, expr->CountBinOpFeedbackId());
-  patch_site.EmitPatchInfo();
-  __ bind(&done);
-
-  // Store the value returned in eax.
-  switch (assign_type) {
-    case VARIABLE:
-      if (expr->is_postfix()) {
-        // Perform the assignment as if via '='.
-        { EffectContext context(this);
-          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                                 Token::ASSIGN);
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-          context.Plug(eax);
-        }
-        // For all contexts except EffectContext We have the result on
-        // top of the stack.
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN);
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-        context()->Plug(eax);
-      }
-      break;
-    case NAMED_PROPERTY: {
-      __ mov(StoreDescriptor::NameRegister(),
-             prop->key()->AsLiteral()->value());
-      __ pop(StoreDescriptor::ReceiverRegister());
-      CallStoreIC(expr->CountStoreFeedbackId());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-    case KEYED_PROPERTY: {
-      __ pop(StoreDescriptor::NameRegister());
-      __ pop(StoreDescriptor::ReceiverRegister());
-      Handle<Code> ic =
-          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
-      CallIC(ic, expr->CountStoreFeedbackId());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-      if (expr->is_postfix()) {
-        // Result is on the stack
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-  }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  VariableProxy* proxy = expr->AsVariableProxy();
-  DCHECK(!context()->IsEffect());
-  DCHECK(!context()->IsTest());
-
-  if (proxy != NULL && proxy->var()->IsUnallocated()) {
-    Comment cmnt(masm_, "[ Global variable");
-    __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
-    __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
-    if (FLAG_vector_ics) {
-      __ mov(VectorLoadICDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
-    }
-    // Use a regular load, not a contextual load, to avoid a reference
-    // error.
-    CallLoadIC(NOT_CONTEXTUAL);
-    PrepareForBailout(expr, TOS_REG);
-    context()->Plug(eax);
-  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
-    Comment cmnt(masm_, "[ Lookup slot");
-    Label done, slow;
-
-    // Generate code for loading from variables potentially shadowed
-    // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    __ push(esi);
-    __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
-    PrepareForBailout(expr, TOS_REG);
-    __ bind(&done);
-
-    context()->Plug(eax);
-  } else {
-    // This expression cannot throw a reference error at the top level.
-    VisitInDuplicateContext(expr);
-  }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Expression* sub_expr,
-                                                 Handle<String> check) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  { AccumulatorValueContext context(this);
-    VisitForTypeofValue(sub_expr);
-  }
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
-  Factory* factory = isolate()->factory();
-  if (String::Equals(check, factory->number_string())) {
-    __ JumpIfSmi(eax, if_true);
-    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-           isolate()->factory()->heap_number_map());
-    Split(equal, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->string_string())) {
-    __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
-    __ j(above_equal, if_false);
-    // Check for undetectable objects => false.
-    __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    Split(zero, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->symbol_string())) {
-    __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, SYMBOL_TYPE, edx);
-    Split(equal, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->boolean_string())) {
-    __ cmp(eax, isolate()->factory()->true_value());
-    __ j(equal, if_true);
-    __ cmp(eax, isolate()->factory()->false_value());
-    Split(equal, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->undefined_string())) {
-    __ cmp(eax, isolate()->factory()->undefined_value());
-    __ j(equal, if_true);
-    __ JumpIfSmi(eax, if_false);
-    // Check for undetectable objects => true.
-    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-    __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
-    __ test(ecx, Immediate(1 << Map::kIsUndetectable));
-    Split(not_zero, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->function_string())) {
-    __ JumpIfSmi(eax, if_false);
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
-    __ j(equal, if_true);
-    __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
-    Split(equal, if_true, if_false, fall_through);
-  } else if (String::Equals(check, factory->object_string())) {
-    __ JumpIfSmi(eax, if_false);
-    __ cmp(eax, isolate()->factory()->null_value());
-    __ j(equal, if_true);
-    __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
-    __ j(below, if_false);
-    __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-    __ j(above, if_false);
-    // Check for undetectable objects => false.
-    __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    Split(zero, if_true, if_false, fall_through);
-  } else {
-    if (if_false != fall_through) __ jmp(if_false);
-  }
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
-  Comment cmnt(masm_, "[ CompareOperation");
-  SetSourcePosition(expr->position());
-
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr)) return;
-
-  // Always perform the comparison for its control flow.  Pack the result
-  // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  Token::Value op = expr->op();
-  VisitForStackValue(expr->left());
-  switch (op) {
-    case Token::IN:
-      VisitForStackValue(expr->right());
-      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
-      __ cmp(eax, isolate()->factory()->true_value());
-      Split(equal, if_true, if_false, fall_through);
-      break;
-
-    case Token::INSTANCEOF: {
-      VisitForStackValue(expr->right());
-      InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
-      __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-      __ test(eax, eax);
-      // The stub returns 0 for true.
-      Split(zero, if_true, if_false, fall_through);
-      break;
-    }
-
-    default: {
-      VisitForAccumulatorValue(expr->right());
-      Condition cc = CompareIC::ComputeCondition(op);
-      __ pop(edx);
-
-      bool inline_smi_code = ShouldInlineSmiCase(op);
-      JumpPatchSite patch_site(masm_);
-      if (inline_smi_code) {
-        Label slow_case;
-        __ mov(ecx, edx);
-        __ or_(ecx, eax);
-        patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-        __ cmp(edx, eax);
-        Split(cc, if_true, if_false, NULL);
-        __ bind(&slow_case);
-      }
-
-      // Record position and call the compare IC.
-      SetSourcePosition(expr->position());
-      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
-      CallIC(ic, expr->CompareOperationFeedbackId());
-      patch_site.EmitPatchInfo();
-
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-      __ test(eax, eax);
-      Split(cc, if_true, if_false, fall_through);
-    }
-  }
-
-  // Convert the result of the comparison into one expected for this
-  // expression's context.
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
-                                              Expression* sub_expr,
-                                              NilValue nil) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
-  Handle<Object> nil_value = nil == kNullValue
-      ? isolate()->factory()->null_value()
-      : isolate()->factory()->undefined_value();
-  if (expr->op() == Token::EQ_STRICT) {
-    __ cmp(eax, nil_value);
-    Split(equal, if_true, if_false, fall_through);
-  } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ test(eax, eax);
-    Split(not_zero, if_true, if_false, fall_through);
-  }
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(eax);
-}
-
-
-Register FullCodeGenerator::result_register() {
-  return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
-  return esi;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
-  __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
-  __ mov(dst, ContextOperand(esi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
-  Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_script_scope() ||
-      declaration_scope->is_module_scope()) {
-    // Contexts nested in the native context have a canonical empty function
-    // as their closure, not the anonymous closure containing the global
-    // code.  Pass a smi sentinel and let the runtime look up the empty
-    // function.
-    __ push(Immediate(Smi::FromInt(0)));
-  } else if (declaration_scope->is_eval_scope()) {
-    // Contexts nested inside eval code have the same closure as the context
-    // calling eval, not the anonymous closure containing the eval code.
-    // Fetch it from the context.
-    __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
-  } else {
-    DCHECK(declaration_scope->is_function_scope());
-    __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  }
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  // Cook return address on top of stack (smi encoded Code* delta)
-  DCHECK(!result_register().is(edx));
-  __ pop(edx);
-  __ sub(edx, Immediate(masm_->CodeObject()));
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ SmiTag(edx);
-  __ push(edx);
-
-  // Store result register while executing finally block.
-  __ push(result_register());
-
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(edx, Operand::StaticVariable(pending_message_obj));
-  __ push(edx);
-
-  ExternalReference has_pending_message =
-      ExternalReference::address_of_has_pending_message(isolate());
-  __ mov(edx, Operand::StaticVariable(has_pending_message));
-  __ SmiTag(edx);
-  __ push(edx);
-
-  ExternalReference pending_message_script =
-      ExternalReference::address_of_pending_message_script(isolate());
-  __ mov(edx, Operand::StaticVariable(pending_message_script));
-  __ push(edx);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(edx));
-  // Restore pending message from stack.
-  __ pop(edx);
-  ExternalReference pending_message_script =
-      ExternalReference::address_of_pending_message_script(isolate());
-  __ mov(Operand::StaticVariable(pending_message_script), edx);
-
-  __ pop(edx);
-  __ SmiUntag(edx);
-  ExternalReference has_pending_message =
-      ExternalReference::address_of_has_pending_message(isolate());
-  __ mov(Operand::StaticVariable(has_pending_message), edx);
-
-  __ pop(edx);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(Operand::StaticVariable(pending_message_obj), edx);
-
-  // Restore result register from stack.
-  __ pop(result_register());
-
-  // Uncook return address.
-  __ pop(edx);
-  __ SmiUntag(edx);
-  __ add(edx, Immediate(masm_->CodeObject()));
-  __ jmp(edx);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
-    int* stack_depth,
-    int* context_length) {
-  // The macros used here must preserve the result register.
-
-  // Because the handler block contains the context of the finally
-  // code, we can restore it directly from there for the finally code
-  // rather than iteratively unwinding contexts via their previous
-  // links.
-  __ Drop(*stack_depth);  // Down to the handler block.
-  if (*context_length > 0) {
-    // Restore the context to its dedicated register and the stack.
-    __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
-    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-  }
-  __ PopTryHandler();
-  __ call(finally_entry_);
-
-  *stack_depth = 0;
-  *context_length = 0;
-  return previous_;
-}
-
-#undef __
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-#ifdef DEBUG
-static const byte kCallInstruction = 0xe8;
-#endif
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
-                            Address pc,
-                            BackEdgeState target_state,
-                            Code* replacement_code) {
-  Address call_target_address = pc - kIntSize;
-  Address jns_instr_address = call_target_address - 3;
-  Address jns_offset_address = call_target_address - 2;
-
-  switch (target_state) {
-    case INTERRUPT:
-      //     sub <profiling_counter>, <delta>  ;; Not changed
-      //     jns ok
-      //     call <interrupt stub>
-      //   ok:
-      *jns_instr_address = kJnsInstruction;
-      *jns_offset_address = kJnsOffset;
-      break;
-    case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
-      //     sub <profiling_counter>, <delta>  ;; Not changed
-      //     nop
-      //     nop
-      //     call <on-stack replacment>
-      //   ok:
-      *jns_instr_address = kNopByteOne;
-      *jns_offset_address = kNopByteTwo;
-      break;
-  }
-
-  Assembler::set_target_address_at(call_target_address,
-                                   unoptimized_code,
-                                   replacement_code->entry());
-  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, replacement_code);
-}
-
-
-BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
-    Isolate* isolate,
-    Code* unoptimized_code,
-    Address pc) {
-  Address call_target_address = pc - kIntSize;
-  Address jns_instr_address = call_target_address - 3;
-  DCHECK_EQ(kCallInstruction, *(call_target_address - 1));
-
-  if (*jns_instr_address == kJnsInstruction) {
-    DCHECK_EQ(kJnsOffset, *(call_target_address - 2));
-    DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(),
-              Assembler::target_address_at(call_target_address,
-                                           unoptimized_code));
-    return INTERRUPT;
-  }
-
-  DCHECK_EQ(kNopByteOne, *jns_instr_address);
-  DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
-
-  if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
-      isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
-  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
-            Assembler::target_address_at(call_target_address,
-                                         unoptimized_code));
-  return OSR_AFTER_STACK_CHECK;
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index 6c77ef8..ad381c7 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/interface-descriptors.h"
@@ -16,12 +14,9 @@
 
 const Register LoadDescriptor::ReceiverRegister() { return edx; }
 const Register LoadDescriptor::NameRegister() { return ecx; }
+const Register LoadDescriptor::SlotRegister() { return eax; }
 
-
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
 
 
 const Register StoreDescriptor::ReceiverRegister() { return edx; }
@@ -29,22 +24,55 @@
 const Register StoreDescriptor::ValueRegister() { return eax; }
 
 
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
 
 
-const Register ElementTransitionAndStoreDescriptor::MapRegister() {
-  return ebx;
+const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
+
+
+const Register VectorStoreTransitionDescriptor::SlotRegister() {
+  return no_reg;
 }
 
 
-const Register InstanceofDescriptor::left() { return eax; }
-const Register InstanceofDescriptor::right() { return edx; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
+
+
+const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
+
+
+const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+
+
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
+
+
+const Register InstanceOfDescriptor::LeftRegister() { return edx; }
+const Register InstanceOfDescriptor::RightRegister() { return eax; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return edx; }
+const Register StringCompareDescriptor::RightRegister() { return eax; }
 
 
 const Register ArgumentsAccessReadDescriptor::index() { return edx; }
 const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
 
 
+const Register ArgumentsAccessNewDescriptor::function() { return edi; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+
+
+const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
+
+
 const Register ApiGetterDescriptor::function_address() { return edx; }
 
 
@@ -56,261 +84,370 @@
 }
 
 
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, ebx};
-  data->Initialize(arraysize(registers), registers, NULL);
+const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
+const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
+
+
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, edi};
-  data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ToNumberDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   // ToNumberStub invokes a function, and therefore needs a context.
-  Register registers[] = {esi, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
-}
+// static
+const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
 
 
-void FastCloneShallowArrayDescriptor::Initialize(
+// static
+const Register ToStringDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
+
+
+void NumberToStringDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax, ebx, ecx};
-  Representation representations[] = {
-      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
-      Representation::Tagged()};
-  data->Initialize(arraysize(registers), registers, representations);
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void FastCloneShallowObjectDescriptor::Initialize(
+void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax, ebx, ecx, edx};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void CreateAllocationSiteDescriptor::Initialize(
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, ebx, edx};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {edi, eax, ecx, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void StoreArrayLiteralElementDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, ecx, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {eax, ebx, ecx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, edi};
-  data->Initialize(arraysize(registers), registers, NULL);
-}
-
-
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, edi, edx};
-  Representation representations[] = {Representation::Tagged(),
-                                      Representation::Tagged(),
-                                      Representation::Smi()};
-  data->Initialize(arraysize(registers), registers, representations);
+  Register registers[] = {eax, ebx, ecx, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ebx, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ebx, edx, edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ecx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi, edx, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void CallConstructDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   // eax : number of arguments
   // ebx : feedback vector
-  // edx : (only if ebx is not the megamorphic symbol) slot in feedback
-  //       vector (Smi)
+  // ecx : new target (for IsSuperConstructorCall)
+  // edx : slot in feedback vector (Smi, for RecordCallTarget)
   // edi : constructor function
   // TODO(turbofan): So far we don't gather type feedback and hence skip the
   // slot parameter, but ArrayConstructStub needs the vector to be undefined.
-  Register registers[] = {esi, eax, edi, ebx};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {eax, edi, ecx, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void RegExpConstructResultDescriptor::Initialize(
+void CallTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, ecx, ebx, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
+  // eax : number of arguments
+  // edi : the target to call
+  Register registers[] = {edi, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void TransitionElementsKindDescriptor::Initialize(
+void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax, ebx};
-  data->Initialize(arraysize(registers), registers, NULL);
+  // eax : number of arguments
+  // edx : the new target
+  // edi : the target to call
+  // ebx : allocation site or undefined
+  Register registers[] = {edi, edx, eax, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void AllocateHeapNumberDescriptor::Initialize(
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // eax : number of arguments
+  // edx : the new target
+  // edi : the target to call
+  Register registers[] = {edi, edx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ecx, ebx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
-  // esi -- context
-  Register registers[] = {esi};
-  data->Initialize(arraysize(registers), registers, nullptr);
+  data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
 
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
   // ebx -- allocation site with elements kind
-  Register registers[] = {esi, edi, ebx};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {edi, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {esi, edi, ebx, eax};
-  Representation representations[] = {
-      Representation::Tagged(), Representation::Tagged(),
-      Representation::Tagged(), Representation::Integer32()};
-  data->Initialize(arraysize(registers), registers, representations);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {edi, ebx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::
+    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
   // register state
   // eax -- number of arguments
   // edi -- function
-  Register registers[] = {esi, edi};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {esi, edi, eax};
-  Representation representations[] = {Representation::Tagged(),
-                                      Representation::Tagged(),
-                                      Representation::Integer32()};
-  data->Initialize(arraysize(registers), registers, representations);
+  Register registers[] = {edi, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
-}
-
-
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
-}
-
-
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, edx, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
-}
-
-
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, ecx, edx, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
+  Register registers[] = {edx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
-  Register registers[] = {esi, edx, eax};
-  data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
 
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ToBooleanDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ecx, edx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      esi,  // context
       ecx,  // key
   };
-  Representation representations[] = {
-      Representation::Tagged(),  // context
-      Representation::Tagged(),  // key
-  };
-  data->Initialize(arraysize(registers), registers, representations);
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      esi,  // context
       ecx,  // name
   };
-  Representation representations[] = {
-      Representation::Tagged(),  // context
-      Representation::Tagged(),  // name
-  };
-  data->Initialize(arraysize(registers), registers, representations);
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      esi,  // context
       edx,  // name
   };
-  Representation representations[] = {
-      Representation::Tagged(),  // context
-      Representation::Tagged(),  // receiver
-  };
-  data->Initialize(arraysize(registers), registers, representations);
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      esi,  // context
       edi,  // JSFunction
+      edx,  // the new target
       eax,  // actual number of arguments
       ebx,  // expected number of arguments
   };
-  Representation representations[] = {
-      Representation::Tagged(),     // context
-      Representation::Tagged(),     // JSFunction
-      Representation::Integer32(),  // actual number of arguments
-      Representation::Integer32(),  // expected number of arguments
-  };
-  data->Initialize(arraysize(registers), registers, representations);
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      esi,  // context
-      eax,  // callee
+      edi,  // callee
+      ebx,  // call_data
+      ecx,  // holder
+      edx,  // api_function_address
+      eax,  // actual number of arguments
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      edi,  // callee
       ebx,  // call_data
       ecx,  // holder
       edx,  // api_function_address
   };
-  Representation representations[] = {
-      Representation::Tagged(),    // context
-      Representation::Tagged(),    // callee
-      Representation::Tagged(),    // call_data
-      Representation::Tagged(),    // holder
-      Representation::External(),  // api_function_address
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // argument count (not including receiver)
+      ebx,  // address of first argument
+      edi   // the target callable to be call
   };
-  data->Initialize(arraysize(registers), registers, representations);
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
+
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // argument count (not including receiver)
+      edx,  // new target
+      edi,  // constructor
+      ebx,  // address of first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
 }
-}  // namespace v8::internal
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // argument count (argc)
+      ecx,  // address of first argument (argv)
+      ebx   // the runtime function to call
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
deleted file mode 100644
index 03a0d8a..0000000
--- a/src/ia32/lithium-codegen-ia32.cc
+++ /dev/null
@@ -1,5751 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/base/bits.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/deoptimizer.h"
-#include "src/hydrogen-osr.h"
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator FINAL : public CallWrapper {
- public:
-  SafepointGenerator(LCodeGen* codegen,
-                     LPointerMap* pointers,
-                     Safepoint::DeoptMode mode)
-      : codegen_(codegen),
-        pointers_(pointers),
-        deopt_mode_(mode) {}
-  virtual ~SafepointGenerator() {}
-
-  void BeforeCall(int call_size) const OVERRIDE {}
-
-  void AfterCall() const OVERRIDE {
-    codegen_->RecordSafepoint(pointers_, deopt_mode_);
-  }
-
- private:
-  LCodeGen* codegen_;
-  LPointerMap* pointers_;
-  Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
-  LPhase phase("Z_Code generation", chunk());
-  DCHECK(is_unused());
-  status_ = GENERATING;
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done in GeneratePrologue).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
-  support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
-  dynamic_frame_alignment_ = info()->IsOptimizing() &&
-      ((chunk()->num_double_slots() > 2 &&
-        !chunk()->graph()->is_recursive()) ||
-       !info()->osr_ast_id().IsNone());
-
-  return GeneratePrologue() &&
-      GenerateBody() &&
-      GenerateDeferredCode() &&
-      GenerateJumpTable() &&
-      GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
-  DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
-  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
-  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
-  PopulateDeoptimizationData(code);
-  if (!info()->IsStub()) {
-    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
-  }
-}
-
-
-#ifdef _MSC_VER
-void LCodeGen::MakeSureStackPagesMapped(int offset) {
-  const int kPageSize = 4 * KB;
-  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
-    __ mov(Operand(esp, offset), eax);
-  }
-}
-#endif
-
-
-void LCodeGen::SaveCallerDoubles() {
-  DCHECK(info()->saves_caller_doubles());
-  DCHECK(NeedsEagerFrame());
-  Comment(";;; Save clobbered callee double registers");
-  int count = 0;
-  BitVector* doubles = chunk()->allocated_double_registers();
-  BitVector::Iterator save_iterator(doubles);
-  while (!save_iterator.Done()) {
-    __ movsd(MemOperand(esp, count * kDoubleSize),
-              XMMRegister::FromAllocationIndex(save_iterator.Current()));
-    save_iterator.Advance();
-    count++;
-  }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
-  DCHECK(info()->saves_caller_doubles());
-  DCHECK(NeedsEagerFrame());
-  Comment(";;; Restore clobbered callee double registers");
-  BitVector* doubles = chunk()->allocated_double_registers();
-  BitVector::Iterator save_iterator(doubles);
-  int count = 0;
-  while (!save_iterator.Done()) {
-    __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
-              MemOperand(esp, count * kDoubleSize));
-    save_iterator.Advance();
-    count++;
-  }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
-  DCHECK(is_generating());
-
-  if (info()->IsOptimizing()) {
-    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
-#endif
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    if (info_->this_has_uses() &&
-        info_->strict_mode() == SLOPPY &&
-        !info_->is_native()) {
-      Label ok;
-      // +1 for return address.
-      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-      __ mov(ecx, Operand(esp, receiver_offset));
-
-      __ cmp(ecx, isolate()->factory()->undefined_value());
-      __ j(not_equal, &ok, Label::kNear);
-
-      __ mov(ecx, GlobalObjectOperand());
-      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
-      __ mov(Operand(esp, receiver_offset), ecx);
-
-      __ bind(&ok);
-    }
-
-    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-      // Move state of dynamic frame alignment into edx.
-      __ Move(edx, Immediate(kNoAlignmentPadding));
-
-      Label do_not_pad, align_loop;
-      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-      // Align esp + 4 to a multiple of 2 * kPointerSize.
-      __ test(esp, Immediate(kPointerSize));
-      __ j(not_zero, &do_not_pad, Label::kNear);
-      __ push(Immediate(0));
-      __ mov(ebx, esp);
-      __ mov(edx, Immediate(kAlignmentPaddingPushed));
-      // Copy arguments, receiver, and return address.
-      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-      __ bind(&align_loop);
-      __ mov(eax, Operand(ebx, 1 * kPointerSize));
-      __ mov(Operand(ebx, 0), eax);
-      __ add(Operand(ebx), Immediate(kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &align_loop, Label::kNear);
-      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-      __ bind(&do_not_pad);
-    }
-  }
-
-  info()->set_prologue_offset(masm_->pc_offset());
-  if (NeedsEagerFrame()) {
-    DCHECK(!frame_is_built_);
-    frame_is_built_ = true;
-    if (info()->IsStub()) {
-      __ StubPrologue();
-    } else {
-      __ Prologue(info()->IsCodePreAgingActive());
-    }
-    info()->AddNoFrameRange(0, masm_->pc_offset());
-  }
-
-  if (info()->IsOptimizing() &&
-      dynamic_frame_alignment_ &&
-      FLAG_debug_code) {
-    __ test(esp, Immediate(kPointerSize));
-    __ Assert(zero, kFrameIsExpectedToBeAligned);
-  }
-
-  // Reserve space for the stack slots needed by the code.
-  int slots = GetStackSlotCount();
-  DCHECK(slots != 0 || !info()->IsOptimizing());
-  if (slots > 0) {
-    if (slots == 1) {
-      if (dynamic_frame_alignment_) {
-        __ push(edx);
-      } else {
-        __ push(Immediate(kNoAlignmentPadding));
-      }
-    } else {
-      if (FLAG_debug_code) {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
-        __ push(eax);
-        __ mov(Operand(eax), Immediate(slots));
-        Label loop;
-        __ bind(&loop);
-        __ mov(MemOperand(esp, eax, times_4, 0),
-               Immediate(kSlotsZapValue));
-        __ dec(eax);
-        __ j(not_zero, &loop);
-        __ pop(eax);
-      } else {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
-      }
-
-      if (support_aligned_spilled_doubles_) {
-        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
-        // Store dynamic frame alignment state in the first local.
-        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
-        if (dynamic_frame_alignment_) {
-          __ mov(Operand(ebp, offset), edx);
-        } else {
-          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
-        }
-      }
-    }
-
-    if (info()->saves_caller_doubles()) SaveCallerDoubles();
-  }
-
-  // Possibly allocate a local context.
-  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0) {
-    Comment(";;; Allocate local context");
-    bool need_write_barrier = true;
-    // Argument to NewContext is the function, which is still in edi.
-    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub(isolate(), heap_slots);
-      __ CallStub(&stub);
-      // Result of FastNewContextStub is always in new space.
-      need_write_barrier = false;
-    } else {
-      __ push(edi);
-      __ CallRuntime(Runtime::kNewFunctionContext, 1);
-    }
-    RecordSafepoint(Safepoint::kNoLazyDeopt);
-    // Context is returned in eax.  It replaces the context passed to us.
-    // It's saved in the stack and kept live in esi.
-    __ mov(esi, eax);
-    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
-
-    // Copy parameters into context if necessary.
-    int num_parameters = scope()->num_parameters();
-    for (int i = 0; i < num_parameters; i++) {
-      Variable* var = scope()->parameter(i);
-      if (var->IsContextSlot()) {
-        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
-            (num_parameters - 1 - i) * kPointerSize;
-        // Load parameter from stack.
-        __ mov(eax, Operand(ebp, parameter_offset));
-        // Store it in the context.
-        int context_offset = Context::SlotOffset(var->index());
-        __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers eax and ebx.
-        if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx,
-                                    kDontSaveFPRegs);
-        } else if (FLAG_debug_code) {
-          Label done;
-          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
-          __ Abort(kExpectedNewSpaceObject);
-          __ bind(&done);
-        }
-      }
-    }
-    Comment(";;; End allocate local context");
-  }
-
-  // Trace the call.
-  if (FLAG_trace && info()->IsOptimizing()) {
-    // We have not executed any compiled code yet, so esi still holds the
-    // incoming context.
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-  return !is_aborted();
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
-  // Generate the OSR entry prologue at the first unknown OSR value, or if there
-  // are none, at the OSR entrypoint instruction.
-  if (osr_pc_offset_ >= 0) return;
-
-  osr_pc_offset_ = masm()->pc_offset();
-
-    // Move state of dynamic frame alignment into edx.
-  __ Move(edx, Immediate(kNoAlignmentPadding));
-
-  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-    Label do_not_pad, align_loop;
-    // Align ebp + 4 to a multiple of 2 * kPointerSize.
-    __ test(ebp, Immediate(kPointerSize));
-    __ j(zero, &do_not_pad, Label::kNear);
-    __ push(Immediate(0));
-    __ mov(ebx, esp);
-    __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
-    // Move all parts of the frame over one word. The frame consists of:
-    // unoptimized frame slots, alignment state, context, frame pointer, return
-    // address, receiver, and the arguments.
-    __ mov(ecx, Immediate(scope()->num_parameters() +
-           5 + graph()->osr()->UnoptimizedFrameSlots()));
-
-    __ bind(&align_loop);
-    __ mov(eax, Operand(ebx, 1 * kPointerSize));
-    __ mov(Operand(ebx, 0), eax);
-    __ add(Operand(ebx), Immediate(kPointerSize));
-    __ dec(ecx);
-    __ j(not_zero, &align_loop, Label::kNear);
-    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-    __ sub(Operand(ebp), Immediate(kPointerSize));
-    __ bind(&do_not_pad);
-  }
-
-  // Save the first local, which is overwritten by the alignment state.
-  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
-  __ push(alignment_loc);
-
-  // Set the dynamic frame alignment state.
-  __ mov(alignment_loc, edx);
-
-  // Adjust the frame size, subsuming the unoptimized frame into the
-  // optimized frame.
-  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  DCHECK(slots >= 1);
-  __ sub(esp, Immediate((slots - 1) * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
-  if (instr->IsCall()) {
-    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
-  }
-  if (!instr->IsLazyBailout() && !instr->IsGap()) {
-    safepoints_.BumpLastLazySafepointIndex();
-  }
-}
-
-
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
-
-
-bool LCodeGen::GenerateJumpTable() {
-  Label needs_frame;
-  if (jump_table_.length() > 0) {
-    Comment(";;; -------------------- Jump table --------------------");
-  }
-  for (int i = 0; i < jump_table_.length(); i++) {
-    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
-    __ bind(&table_entry->label);
-    Address entry = table_entry->address;
-    DeoptComment(table_entry->reason);
-    if (table_entry->needs_frame) {
-      DCHECK(!info()->saves_caller_doubles());
-      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
-      if (needs_frame.is_bound()) {
-        __ jmp(&needs_frame);
-      } else {
-        __ bind(&needs_frame);
-        __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
-        // This variant of deopt can only be used with stubs. Since we don't
-        // have a function pointer to install in the stack frame that we're
-        // building, install a special marker there instead.
-        DCHECK(info()->IsStub());
-        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-        // Push a PC inside the function so that the deopt code can find where
-        // the deopt comes from. It doesn't have to be the precise return
-        // address of a "calling" LAZY deopt, it only has to be somewhere
-        // inside the code body.
-        Label push_approx_pc;
-        __ call(&push_approx_pc);
-        __ bind(&push_approx_pc);
-        // Push the continuation which was stashed were the ebp should
-        // be. Replace it with the saved ebp.
-        __ push(MemOperand(esp, 3 * kPointerSize));
-        __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
-        __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
-        __ ret(0);  // Call the continuation without clobbering registers.
-      }
-    } else {
-      if (info()->saves_caller_doubles()) RestoreCallerDoubles();
-      __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    }
-  }
-  return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
-  DCHECK(is_generating());
-  if (deferred_.length() > 0) {
-    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
-      LDeferredCode* code = deferred_[i];
-
-      HValue* value =
-          instructions_->at(code->instruction_index())->hydrogen_value();
-      RecordAndWritePosition(
-          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
-
-      Comment(";;; <@%d,#%d> "
-              "-------------------- Deferred %s --------------------",
-              code->instruction_index(),
-              code->instr()->hydrogen_value()->id(),
-              code->instr()->Mnemonic());
-      __ bind(code->entry());
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Build frame");
-        DCHECK(!frame_is_built_);
-        DCHECK(info()->IsStub());
-        frame_is_built_ = true;
-        // Build the frame in such a way that esi isn't trashed.
-        __ push(ebp);  // Caller's frame pointer.
-        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
-        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-        __ lea(ebp, Operand(esp, 2 * kPointerSize));
-        Comment(";;; Deferred code");
-      }
-      code->Generate();
-      if (NeedsDeferredFrame()) {
-        __ bind(code->done());
-        Comment(";;; Destroy frame");
-        DCHECK(frame_is_built_);
-        frame_is_built_ = false;
-        __ mov(esp, ebp);
-        __ pop(ebp);
-      }
-      __ jmp(code->exit());
-    }
-  }
-
-  // Deferred code is the last part of the instruction sequence. Mark
-  // the generated code as done unless we bailed out.
-  if (!is_aborted()) status_ = DONE;
-  return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
-  DCHECK(is_done());
-  if (!info()->IsStub()) {
-    // For lazy deoptimization we need space to patch a call after every call.
-    // Ensure there is always space for such patching, even if the code ends
-    // in a call.
-    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
-    while (masm()->pc_offset() < target_offset) {
-      masm()->nop();
-    }
-  }
-  safepoints_.Emit(masm(), GetStackSlotCount());
-  return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
-  return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
-  return XMMRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
-  DCHECK(op->IsRegister());
-  return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  DCHECK(op->IsDoubleRegister());
-  return ToDoubleRegister(op->index());
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
-  return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
-                                   const Representation& r) const {
-  HConstant* constant = chunk_->LookupConstant(op);
-  int32_t value = constant->Integer32Value();
-  if (r.IsInteger32()) return value;
-  DCHECK(r.IsSmiOrTagged());
-  return reinterpret_cast<int32_t>(Smi::FromInt(value));
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
-  HConstant* constant = chunk_->LookupConstant(op);
-  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
-  return constant->handle(isolate());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
-  HConstant* constant = chunk_->LookupConstant(op);
-  DCHECK(constant->HasDoubleValue());
-  return constant->DoubleValue();
-}
-
-
-ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
-  HConstant* constant = chunk_->LookupConstant(op);
-  DCHECK(constant->HasExternalReferenceValue());
-  return constant->ExternalReferenceValue();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
-  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
-  return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
-  DCHECK(index < 0);
-  return -(index + 1) * kPointerSize + kPCOnStackSize;
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
-  if (op->IsRegister()) return Operand(ToRegister(op));
-  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
-  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-  if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()));
-  } else {
-    // Retrieve parameter without eager stack-frame relative to the
-    // stack-pointer.
-    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
-  }
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
-  DCHECK(op->IsDoubleStackSlot());
-  if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
-  } else {
-    // Retrieve parameter without eager stack-frame relative to the
-    // stack-pointer.
-    return Operand(
-        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
-  }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
-                                Translation* translation) {
-  if (environment == NULL) return;
-
-  // The translation includes one command per value in the environment.
-  int translation_size = environment->translation_size();
-  // The output frame height does not include the parameters.
-  int height = translation_size - environment->parameter_count();
-
-  WriteTranslation(environment->outer(), translation);
-  bool has_closure_id = !info()->closure().is_null() &&
-      !info()->closure().is_identical_to(environment->closure());
-  int closure_id = has_closure_id
-      ? DefineDeoptimizationLiteral(environment->closure())
-      : Translation::kSelfLiteralId;
-  switch (environment->frame_type()) {
-    case JS_FUNCTION:
-      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
-      break;
-    case JS_CONSTRUCT:
-      translation->BeginConstructStubFrame(closure_id, translation_size);
-      break;
-    case JS_GETTER:
-      DCHECK(translation_size == 1);
-      DCHECK(height == 0);
-      translation->BeginGetterStubFrame(closure_id);
-      break;
-    case JS_SETTER:
-      DCHECK(translation_size == 2);
-      DCHECK(height == 0);
-      translation->BeginSetterStubFrame(closure_id);
-      break;
-    case ARGUMENTS_ADAPTOR:
-      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
-      break;
-    case STUB:
-      translation->BeginCompiledStubFrame();
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  int object_index = 0;
-  int dematerialized_index = 0;
-  for (int i = 0; i < translation_size; ++i) {
-    LOperand* value = environment->values()->at(i);
-    AddToTranslation(environment,
-                     translation,
-                     value,
-                     environment->HasTaggedValueAt(i),
-                     environment->HasUint32ValueAt(i),
-                     &object_index,
-                     &dematerialized_index);
-  }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
-                                Translation* translation,
-                                LOperand* op,
-                                bool is_tagged,
-                                bool is_uint32,
-                                int* object_index_pointer,
-                                int* dematerialized_index_pointer) {
-  if (op == LEnvironment::materialization_marker()) {
-    int object_index = (*object_index_pointer)++;
-    if (environment->ObjectIsDuplicateAt(object_index)) {
-      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
-      translation->DuplicateObject(dupe_of);
-      return;
-    }
-    int object_length = environment->ObjectLengthAt(object_index);
-    if (environment->ObjectIsArgumentsAt(object_index)) {
-      translation->BeginArgumentsObject(object_length);
-    } else {
-      translation->BeginCapturedObject(object_length);
-    }
-    int dematerialized_index = *dematerialized_index_pointer;
-    int env_offset = environment->translation_size() + dematerialized_index;
-    *dematerialized_index_pointer += object_length;
-    for (int i = 0; i < object_length; ++i) {
-      LOperand* value = environment->values()->at(env_offset + i);
-      AddToTranslation(environment,
-                       translation,
-                       value,
-                       environment->HasTaggedValueAt(env_offset + i),
-                       environment->HasUint32ValueAt(env_offset + i),
-                       object_index_pointer,
-                       dematerialized_index_pointer);
-    }
-    return;
-  }
-
-  if (op->IsStackSlot()) {
-    if (is_tagged) {
-      translation->StoreStackSlot(op->index());
-    } else if (is_uint32) {
-      translation->StoreUint32StackSlot(op->index());
-    } else {
-      translation->StoreInt32StackSlot(op->index());
-    }
-  } else if (op->IsDoubleStackSlot()) {
-    translation->StoreDoubleStackSlot(op->index());
-  } else if (op->IsRegister()) {
-    Register reg = ToRegister(op);
-    if (is_tagged) {
-      translation->StoreRegister(reg);
-    } else if (is_uint32) {
-      translation->StoreUint32Register(reg);
-    } else {
-      translation->StoreInt32Register(reg);
-    }
-  } else if (op->IsDoubleRegister()) {
-    XMMRegister reg = ToDoubleRegister(op);
-    translation->StoreDoubleRegister(reg);
-  } else if (op->IsConstantOperand()) {
-    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
-    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
-    translation->StoreLiteral(src_index);
-  } else {
-    UNREACHABLE();
-  }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
-                               RelocInfo::Mode mode,
-                               LInstruction* instr,
-                               SafepointMode safepoint_mode) {
-  DCHECK(instr != NULL);
-  __ call(code, mode);
-  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
-  // Signal that we don't inline smi code before these stubs in the
-  // optimizing code generator.
-  if (code->kind() == Code::BINARY_OP_IC ||
-      code->kind() == Code::COMPARE_IC) {
-    __ nop();
-  }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr) {
-  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
-                           int argc,
-                           LInstruction* instr,
-                           SaveFPRegsMode save_doubles) {
-  DCHECK(instr != NULL);
-  DCHECK(instr->HasPointerMap());
-
-  __ CallRuntime(fun, argc, save_doubles);
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
-  DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
-  if (context->IsRegister()) {
-    if (!ToRegister(context).is(esi)) {
-      __ mov(esi, ToRegister(context));
-    }
-  } else if (context->IsStackSlot()) {
-    __ mov(esi, ToOperand(context));
-  } else if (context->IsConstantOperand()) {
-    HConstant* constant =
-        chunk_->LookupConstant(LConstantOperand::cast(context));
-    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
-  } else {
-    UNREACHABLE();
-  }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
-                                       int argc,
-                                       LInstruction* instr,
-                                       LOperand* context) {
-  LoadContextFromDeferred(context);
-
-  __ CallRuntimeSaveDoubles(id);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
-  DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(
-    LEnvironment* environment, Safepoint::DeoptMode mode) {
-  environment->set_has_been_used();
-  if (!environment->HasBeenRegistered()) {
-    // Physical stack frame layout:
-    // -x ............. -4  0 ..................................... y
-    // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
-    // Layout of the environment:
-    // 0 ..................................................... size-1
-    // [parameters] [locals] [expression stack including arguments]
-
-    // Layout of the translation:
-    // 0 ........................................................ size - 1 + 4
-    // [expression stack including arguments] [locals] [4 words] [parameters]
-    // |>------------  translation_size ------------<|
-
-    int frame_count = 0;
-    int jsframe_count = 0;
-    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
-      ++frame_count;
-      if (e->frame_type() == JS_FUNCTION) {
-        ++jsframe_count;
-      }
-    }
-    Translation translation(&translations_, frame_count, jsframe_count, zone());
-    WriteTranslation(environment, &translation);
-    int deoptimization_index = deoptimizations_.length();
-    int pc_offset = masm()->pc_offset();
-    environment->Register(deoptimization_index,
-                          translation.index(),
-                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
-    deoptimizations_.Add(environment, zone());
-  }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* detail,
-                            Deoptimizer::BailoutType bailout_type) {
-  LEnvironment* environment = instr->environment();
-  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  DCHECK(environment->HasBeenRegistered());
-  int id = environment->deoptimization_index();
-  DCHECK(info()->IsOptimizing() || info()->IsStub());
-  Address entry =
-      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
-  if (entry == NULL) {
-    Abort(kBailoutWasNotPrepared);
-    return;
-  }
-
-  if (DeoptEveryNTimes()) {
-    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
-    Label no_deopt;
-    __ pushfd();
-    __ push(eax);
-    __ mov(eax, Operand::StaticVariable(count));
-    __ sub(eax, Immediate(1));
-    __ j(not_zero, &no_deopt, Label::kNear);
-    if (FLAG_trap_on_deopt) __ int3();
-    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
-    __ mov(Operand::StaticVariable(count), eax);
-    __ pop(eax);
-    __ popfd();
-    DCHECK(frame_is_built_);
-    __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    __ bind(&no_deopt);
-    __ mov(Operand::StaticVariable(count), eax);
-    __ pop(eax);
-    __ popfd();
-  }
-
-  if (info()->ShouldTrapOnDeopt()) {
-    Label done;
-    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
-    __ int3();
-    __ bind(&done);
-  }
-
-  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
-                             instr->Mnemonic(), detail);
-  DCHECK(info()->IsStub() || frame_is_built_);
-  if (cc == no_condition && frame_is_built_) {
-    DeoptComment(reason);
-    __ call(entry, RelocInfo::RUNTIME_ENTRY);
-  } else {
-    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
-                                            !frame_is_built_);
-    // We often have several deopts to the same entry, reuse the last
-    // jump entry if this is the case.
-    if (jump_table_.is_empty() ||
-        !table_entry.IsEquivalentTo(jump_table_.last())) {
-      jump_table_.Add(table_entry, zone());
-    }
-    if (cc == no_condition) {
-      __ jmp(&jump_table_.last().label);
-    } else {
-      __ j(cc, &jump_table_.last().label);
-    }
-  }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* detail) {
-  Deoptimizer::BailoutType bailout_type = info()->IsStub()
-      ? Deoptimizer::LAZY
-      : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, instr, detail, bailout_type);
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
-  int length = deoptimizations_.length();
-  if (length == 0) return;
-  Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
-
-  Handle<ByteArray> translations =
-      translations_.CreateByteArray(isolate()->factory());
-  data->SetTranslationByteArray(*translations);
-  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
-  if (info_->IsOptimizing()) {
-    // Reference to shared function info does not change between phases.
-    AllowDeferredHandleDereference allow_handle_dereference;
-    data->SetSharedFunctionInfo(*info_->shared_info());
-  } else {
-    data->SetSharedFunctionInfo(Smi::FromInt(0));
-  }
-
-  Handle<FixedArray> literals =
-      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
-  { AllowDeferredHandleDereference copy_handles;
-    for (int i = 0; i < deoptimization_literals_.length(); i++) {
-      literals->set(i, *deoptimization_literals_[i]);
-    }
-    data->SetLiteralArray(*literals);
-  }
-
-  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
-  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
-  // Populate the deoptimization entries.
-  for (int i = 0; i < length; i++) {
-    LEnvironment* env = deoptimizations_[i];
-    data->SetAstId(i, env->ast_id());
-    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
-    data->SetArgumentsStackHeight(i,
-                                  Smi::FromInt(env->arguments_stack_height()));
-    data->SetPc(i, Smi::FromInt(env->pc_offset()));
-  }
-  code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
-  int result = deoptimization_literals_.length();
-  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
-    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
-  }
-  deoptimization_literals_.Add(literal, zone());
-  return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  DCHECK(deoptimization_literals_.length() == 0);
-
-  const ZoneList<Handle<JSFunction> >* inlined_closures =
-      chunk()->inlined_closures();
-
-  for (int i = 0, length = inlined_closures->length();
-       i < length;
-       i++) {
-    DefineDeoptimizationLiteral(inlined_closures->at(i));
-  }
-
-  inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
-    LInstruction* instr, SafepointMode safepoint_mode) {
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
-  } else {
-    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
-  }
-}
-
-
-void LCodeGen::RecordSafepoint(
-    LPointerMap* pointers,
-    Safepoint::Kind kind,
-    int arguments,
-    Safepoint::DeoptMode deopt_mode) {
-  DCHECK(kind == expected_safepoint_kind_);
-  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
-  Safepoint safepoint =
-      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index(), zone());
-    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
-      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
-    }
-  }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               Safepoint::DeoptMode mode) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
-  LPointerMap empty_pointers(zone());
-  RecordSafepoint(&empty_pointers, mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
-                                            int arguments,
-                                            Safepoint::DeoptMode mode) {
-  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
-}
-
-
-void LCodeGen::RecordAndWritePosition(int position) {
-  if (position == RelocInfo::kNoPosition) return;
-  masm()->positions_recorder()->RecordPosition(position);
-  masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
-static const char* LabelType(LLabel* label) {
-  if (label->is_loop_header()) return " (loop header)";
-  if (label->is_osr_entry()) return " (OSR entry)";
-  return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
-  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
-          current_instruction_,
-          label->hydrogen_value()->id(),
-          label->block_id(),
-          LabelType(label));
-  __ bind(label->label());
-  current_block_ = label->block_id();
-  DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
-  resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
-  for (int i = LGap::FIRST_INNER_POSITION;
-       i <= LGap::LAST_INNER_POSITION;
-       i++) {
-    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
-    LParallelMove* move = gap->GetParallelMove(inner_pos);
-    if (move != NULL) DoParallelMove(move);
-  }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
-  DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
-  // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::StringCompare: {
-      StringCompareStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  DCHECK(dividend.is(ToRegister(instr->result())));
-
-  // Theoretically, a variation of the branch-free code for integer division by
-  // a power of 2 (calculating the remainder via an additional multiplication
-  // (which gets simplified to an 'and') and subtraction) should be faster, and
-  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
-  // indicate that positive dividends are heavily favored, so the branching
-  // version performs better.
-  HMod* hmod = instr->hydrogen();
-  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
-  Label dividend_is_not_negative, done;
-  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
-    __ test(dividend, dividend);
-    __ j(not_sign, &dividend_is_not_negative, Label::kNear);
-    // Note that this is correct even for kMinInt operands.
-    __ neg(dividend);
-    __ and_(dividend, mask);
-    __ neg(dividend);
-    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr, "minus zero");
-    }
-    __ jmp(&done, Label::kNear);
-  }
-
-  __ bind(&dividend_is_not_negative);
-  __ and_(dividend, mask);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr, "division by zero");
-    return;
-  }
-
-  __ TruncatingDiv(dividend, Abs(divisor));
-  __ imul(edx, edx, Abs(divisor));
-  __ mov(eax, dividend);
-  __ sub(eax, edx);
-
-  // Check for negative zero.
-  HMod* hmod = instr->hydrogen();
-  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    Label remainder_not_zero;
-    __ j(not_zero, &remainder_not_zero, Label::kNear);
-    __ cmp(dividend, Immediate(0));
-    DeoptimizeIf(less, instr, "minus zero");
-    __ bind(&remainder_not_zero);
-  }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
-  HMod* hmod = instr->hydrogen();
-
-  Register left_reg = ToRegister(instr->left());
-  DCHECK(left_reg.is(eax));
-  Register right_reg = ToRegister(instr->right());
-  DCHECK(!right_reg.is(eax));
-  DCHECK(!right_reg.is(edx));
-  Register result_reg = ToRegister(instr->result());
-  DCHECK(result_reg.is(edx));
-
-  Label done;
-  // Check for x % 0, idiv would signal a divide error. We have to
-  // deopt in this case because we can't return a NaN.
-  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
-    __ test(right_reg, Operand(right_reg));
-    DeoptimizeIf(zero, instr, "division by zero");
-  }
-
-  // Check for kMinInt % -1, idiv would signal a divide error. We
-  // have to deopt if we care about -0, because we can't return that.
-  if (hmod->CheckFlag(HValue::kCanOverflow)) {
-    Label no_overflow_possible;
-    __ cmp(left_reg, kMinInt);
-    __ j(not_equal, &no_overflow_possible, Label::kNear);
-    __ cmp(right_reg, -1);
-    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr, "minus zero");
-    } else {
-      __ j(not_equal, &no_overflow_possible, Label::kNear);
-      __ Move(result_reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    }
-    __ bind(&no_overflow_possible);
-  }
-
-  // Sign extend dividend in eax into edx:eax.
-  __ cdq();
-
-  // If we care about -0, test if the dividend is <0 and the result is 0.
-  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    Label positive_left;
-    __ test(left_reg, Operand(left_reg));
-    __ j(not_sign, &positive_left, Label::kNear);
-    __ idiv(right_reg);
-    __ test(result_reg, Operand(result_reg));
-    DeoptimizeIf(zero, instr, "minus zero");
-    __ jmp(&done, Label::kNear);
-    __ bind(&positive_left);
-  }
-  __ idiv(right_reg);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  Register result = ToRegister(instr->result());
-  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
-  DCHECK(!result.is(dividend));
-
-  // Check for (0 / -x) that will produce negative zero.
-  HDiv* hdiv = instr->hydrogen();
-  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr, "minus zero");
-  }
-  // Check for (kMinInt / -1).
-  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
-    __ cmp(dividend, kMinInt);
-    DeoptimizeIf(zero, instr, "overflow");
-  }
-  // Deoptimize if remainder will not be 0.
-  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
-      divisor != 1 && divisor != -1) {
-    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
-    __ test(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr, "lost precision");
-  }
-  __ Move(result, dividend);
-  int32_t shift = WhichPowerOf2Abs(divisor);
-  if (shift > 0) {
-    // The arithmetic shift is always OK, the 'if' is an optimization only.
-    if (shift > 1) __ sar(result, 31);
-    __ shr(result, 32 - shift);
-    __ add(result, dividend);
-    __ sar(result, shift);
-  }
-  if (divisor < 0) __ neg(result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  DCHECK(ToRegister(instr->result()).is(edx));
-
-  if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr, "division by zero");
-    return;
-  }
-
-  // Check for (0 / -x) that will produce negative zero.
-  HDiv* hdiv = instr->hydrogen();
-  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr, "minus zero");
-  }
-
-  __ TruncatingDiv(dividend, Abs(divisor));
-  if (divisor < 0) __ neg(edx);
-
-  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
-    __ mov(eax, edx);
-    __ imul(eax, eax, divisor);
-    __ sub(eax, dividend);
-    DeoptimizeIf(not_equal, instr, "lost precision");
-  }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
-  HBinaryOperation* hdiv = instr->hydrogen();
-  Register dividend = ToRegister(instr->dividend());
-  Register divisor = ToRegister(instr->divisor());
-  Register remainder = ToRegister(instr->temp());
-  DCHECK(dividend.is(eax));
-  DCHECK(remainder.is(edx));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  DCHECK(!divisor.is(eax));
-  DCHECK(!divisor.is(edx));
-
-  // Check for x / 0.
-  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
-    __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr, "division by zero");
-  }
-
-  // Check for (0 / -x) that will produce negative zero.
-  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    Label dividend_not_zero;
-    __ test(dividend, dividend);
-    __ j(not_zero, &dividend_not_zero, Label::kNear);
-    __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr, "minus zero");
-    __ bind(&dividend_not_zero);
-  }
-
-  // Check for (kMinInt / -1).
-  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
-    Label dividend_not_min_int;
-    __ cmp(dividend, kMinInt);
-    __ j(not_zero, &dividend_not_min_int, Label::kNear);
-    __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr, "overflow");
-    __ bind(&dividend_not_min_int);
-  }
-
-  // Sign extend to edx (= remainder).
-  __ cdq();
-  __ idiv(divisor);
-
-  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
-    // Deoptimize if remainder is not 0.
-    __ test(remainder, remainder);
-    DeoptimizeIf(not_zero, instr, "lost precision");
-  }
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  DCHECK(dividend.is(ToRegister(instr->result())));
-
-  // If the divisor is positive, things are easy: There can be no deopts and we
-  // can simply do an arithmetic right shift.
-  if (divisor == 1) return;
-  int32_t shift = WhichPowerOf2Abs(divisor);
-  if (divisor > 1) {
-    __ sar(dividend, shift);
-    return;
-  }
-
-  // If the divisor is negative, we have to negate and handle edge cases.
-  __ neg(dividend);
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr, "minus zero");
-  }
-
-  // Dividing by -1 is basically negation, unless we overflow.
-  if (divisor == -1) {
-    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr, "overflow");
-    }
-    return;
-  }
-
-  // If the negation could not overflow, simply shifting is OK.
-  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-    __ sar(dividend, shift);
-    return;
-  }
-
-  Label not_kmin_int, done;
-  __ j(no_overflow, &not_kmin_int, Label::kNear);
-  __ mov(dividend, Immediate(kMinInt / divisor));
-  __ jmp(&done, Label::kNear);
-  __ bind(&not_kmin_int);
-  __ sar(dividend, shift);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
-  Register dividend = ToRegister(instr->dividend());
-  int32_t divisor = instr->divisor();
-  DCHECK(ToRegister(instr->result()).is(edx));
-
-  if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr, "division by zero");
-    return;
-  }
-
-  // Check for (0 / -x) that will produce negative zero.
-  HMathFloorOfDiv* hdiv = instr->hydrogen();
-  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr, "minus zero");
-  }
-
-  // Easy case: We need no dynamic check for the dividend and the flooring
-  // division is the same as the truncating division.
-  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
-      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
-    __ TruncatingDiv(dividend, Abs(divisor));
-    if (divisor < 0) __ neg(edx);
-    return;
-  }
-
-  // In the general case we may need to adjust before and after the truncating
-  // division to get a flooring division.
-  Register temp = ToRegister(instr->temp3());
-  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
-  Label needs_adjustment, done;
-  __ cmp(dividend, Immediate(0));
-  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
-  __ TruncatingDiv(dividend, Abs(divisor));
-  if (divisor < 0) __ neg(edx);
-  __ jmp(&done, Label::kNear);
-  __ bind(&needs_adjustment);
-  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
-  __ TruncatingDiv(temp, Abs(divisor));
-  if (divisor < 0) __ neg(edx);
-  __ dec(edx);
-  __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
-  HBinaryOperation* hdiv = instr->hydrogen();
-  Register dividend = ToRegister(instr->dividend());
-  Register divisor = ToRegister(instr->divisor());
-  Register remainder = ToRegister(instr->temp());
-  Register result = ToRegister(instr->result());
-  DCHECK(dividend.is(eax));
-  DCHECK(remainder.is(edx));
-  DCHECK(result.is(eax));
-  DCHECK(!divisor.is(eax));
-  DCHECK(!divisor.is(edx));
-
-  // Check for x / 0.
-  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
-    __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr, "division by zero");
-  }
-
-  // Check for (0 / -x) that will produce negative zero.
-  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    Label dividend_not_zero;
-    __ test(dividend, dividend);
-    __ j(not_zero, &dividend_not_zero, Label::kNear);
-    __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr, "minus zero");
-    __ bind(&dividend_not_zero);
-  }
-
-  // Check for (kMinInt / -1).
-  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
-    Label dividend_not_min_int;
-    __ cmp(dividend, kMinInt);
-    __ j(not_zero, &dividend_not_min_int, Label::kNear);
-    __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr, "overflow");
-    __ bind(&dividend_not_min_int);
-  }
-
-  // Sign extend to edx (= remainder).
-  __ cdq();
-  __ idiv(divisor);
-
-  Label done;
-  __ test(remainder, remainder);
-  __ j(zero, &done, Label::kNear);
-  __ xor_(remainder, divisor);
-  __ sar(remainder, 31);
-  __ add(result, remainder);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
-  Register left = ToRegister(instr->left());
-  LOperand* right = instr->right();
-
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ mov(ToRegister(instr->temp()), left);
-  }
-
-  if (right->IsConstantOperand()) {
-    // Try strength reductions on the multiplication.
-    // All replacement instructions are at most as long as the imul
-    // and have better latency.
-    int constant = ToInteger32(LConstantOperand::cast(right));
-    if (constant == -1) {
-      __ neg(left);
-    } else if (constant == 0) {
-      __ xor_(left, Operand(left));
-    } else if (constant == 2) {
-      __ add(left, Operand(left));
-    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      // If we know that the multiplication can't overflow, it's safe to
-      // use instructions that don't set the overflow flag for the
-      // multiplication.
-      switch (constant) {
-        case 1:
-          // Do nothing.
-          break;
-        case 3:
-          __ lea(left, Operand(left, left, times_2, 0));
-          break;
-        case 4:
-          __ shl(left, 2);
-          break;
-        case 5:
-          __ lea(left, Operand(left, left, times_4, 0));
-          break;
-        case 8:
-          __ shl(left, 3);
-          break;
-        case 9:
-          __ lea(left, Operand(left, left, times_8, 0));
-          break;
-        case 16:
-          __ shl(left, 4);
-          break;
-        default:
-          __ imul(left, left, constant);
-          break;
-      }
-    } else {
-      __ imul(left, left, constant);
-    }
-  } else {
-    if (instr->hydrogen()->representation().IsSmi()) {
-      __ SmiUntag(left);
-    }
-    __ imul(left, ToOperand(right));
-  }
-
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr, "overflow");
-  }
-
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // Bail out if the result is supposed to be negative zero.
-    Label done;
-    __ test(left, Operand(left));
-    __ j(not_zero, &done, Label::kNear);
-    if (right->IsConstantOperand()) {
-      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr, "minus zero");
-      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
-        __ cmp(ToRegister(instr->temp()), Immediate(0));
-        DeoptimizeIf(less, instr, "minus zero");
-      }
-    } else {
-      // Test the non-zero operand for negative sign.
-      __ or_(ToRegister(instr->temp()), ToOperand(right));
-      DeoptimizeIf(sign, instr, "minus zero");
-    }
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  DCHECK(left->Equals(instr->result()));
-  DCHECK(left->IsRegister());
-
-  if (right->IsConstantOperand()) {
-    int32_t right_operand =
-        ToRepresentation(LConstantOperand::cast(right),
-                         instr->hydrogen()->representation());
-    switch (instr->op()) {
-      case Token::BIT_AND:
-        __ and_(ToRegister(left), right_operand);
-        break;
-      case Token::BIT_OR:
-        __ or_(ToRegister(left), right_operand);
-        break;
-      case Token::BIT_XOR:
-        if (right_operand == int32_t(~0)) {
-          __ not_(ToRegister(left));
-        } else {
-          __ xor_(ToRegister(left), right_operand);
-        }
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  } else {
-    switch (instr->op()) {
-      case Token::BIT_AND:
-        __ and_(ToRegister(left), ToOperand(right));
-        break;
-      case Token::BIT_OR:
-        __ or_(ToRegister(left), ToOperand(right));
-        break;
-      case Token::BIT_XOR:
-        __ xor_(ToRegister(left), ToOperand(right));
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  DCHECK(left->Equals(instr->result()));
-  DCHECK(left->IsRegister());
-  if (right->IsRegister()) {
-    DCHECK(ToRegister(right).is(ecx));
-
-    switch (instr->op()) {
-      case Token::ROR:
-        __ ror_cl(ToRegister(left));
-        break;
-      case Token::SAR:
-        __ sar_cl(ToRegister(left));
-        break;
-      case Token::SHR:
-        __ shr_cl(ToRegister(left));
-        if (instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr, "negative value");
-        }
-        break;
-      case Token::SHL:
-        __ shl_cl(ToRegister(left));
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  } else {
-    int value = ToInteger32(LConstantOperand::cast(right));
-    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
-    switch (instr->op()) {
-      case Token::ROR:
-        if (shift_count == 0 && instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr, "negative value");
-        } else {
-          __ ror(ToRegister(left), shift_count);
-        }
-        break;
-      case Token::SAR:
-        if (shift_count != 0) {
-          __ sar(ToRegister(left), shift_count);
-        }
-        break;
-      case Token::SHR:
-        if (shift_count != 0) {
-          __ shr(ToRegister(left), shift_count);
-        } else if (instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr, "negative value");
-        }
-        break;
-      case Token::SHL:
-        if (shift_count != 0) {
-          if (instr->hydrogen_value()->representation().IsSmi() &&
-              instr->can_deopt()) {
-            if (shift_count != 1) {
-              __ shl(ToRegister(left), shift_count - 1);
-            }
-            __ SmiTag(ToRegister(left));
-            DeoptimizeIf(overflow, instr, "overflow");
-          } else {
-            __ shl(ToRegister(left), shift_count);
-          }
-        }
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  DCHECK(left->Equals(instr->result()));
-
-  if (right->IsConstantOperand()) {
-    __ sub(ToOperand(left),
-           ToImmediate(right, instr->hydrogen()->representation()));
-  } else {
-    __ sub(ToRegister(left), ToOperand(right));
-  }
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr, "overflow");
-  }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
-  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
-  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
-  double v = instr->value();
-  uint64_t int_val = bit_cast<uint64_t, double>(v);
-  int32_t lower = static_cast<int32_t>(int_val);
-  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-  DCHECK(instr->result()->IsDoubleRegister());
-
-  XMMRegister res = ToDoubleRegister(instr->result());
-  if (int_val == 0) {
-    __ xorps(res, res);
-  } else {
-    Register temp = ToRegister(instr->temp());
-    if (CpuFeatures::IsSupported(SSE4_1)) {
-      CpuFeatureScope scope2(masm(), SSE4_1);
-      if (lower != 0) {
-        __ Move(temp, Immediate(lower));
-        __ movd(res, Operand(temp));
-        __ Move(temp, Immediate(upper));
-        __ pinsrd(res, Operand(temp), 1);
-      } else {
-        __ xorps(res, res);
-        __ Move(temp, Immediate(upper));
-        __ pinsrd(res, Operand(temp), 1);
-      }
-    } else {
-      __ Move(temp, Immediate(upper));
-      __ movd(res, Operand(temp));
-      __ psllq(res, 32);
-      if (lower != 0) {
-        XMMRegister xmm_scratch = double_scratch0();
-        __ Move(temp, Immediate(lower));
-        __ movd(xmm_scratch, Operand(temp));
-        __ orps(res, xmm_scratch);
-      }
-    }
-  }
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
-  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
-  Register reg = ToRegister(instr->result());
-  Handle<Object> object = instr->value(isolate());
-  AllowDeferredHandleDereference smi_check;
-  __ LoadObject(reg, object);
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
-  Register object = ToRegister(instr->date());
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->temp());
-  Smi* index = instr->index();
-  Label runtime, done;
-  DCHECK(object.is(result));
-  DCHECK(object.is(eax));
-
-  __ test(object, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr, "Smi");
-  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  DeoptimizeIf(not_equal, instr, "not a date object");
-
-  if (index->value() == 0) {
-    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch, Operand::StaticVariable(stamp));
-      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
-                                          kPointerSize * index->value()));
-      __ jmp(&done, Label::kNear);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ mov(Operand(esp, 0), object);
-    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-}
-
-
-Operand LCodeGen::BuildSeqStringOperand(Register string,
-                                        LOperand* index,
-                                        String::Encoding encoding) {
-  if (index->IsConstantOperand()) {
-    int offset = ToRepresentation(LConstantOperand::cast(index),
-                                  Representation::Integer32());
-    if (encoding == String::TWO_BYTE_ENCODING) {
-      offset *= kUC16Size;
-    }
-    STATIC_ASSERT(kCharSize == 1);
-    return FieldOperand(string, SeqString::kHeaderSize + offset);
-  }
-  return FieldOperand(
-      string, ToRegister(index),
-      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
-      SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
-  String::Encoding encoding = instr->hydrogen()->encoding();
-  Register result = ToRegister(instr->result());
-  Register string = ToRegister(instr->string());
-
-  if (FLAG_debug_code) {
-    __ push(string);
-    __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
-    __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
-
-    __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
-                             ? one_byte_seq_type : two_byte_seq_type));
-    __ Check(equal, kUnexpectedStringType);
-    __ pop(string);
-  }
-
-  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
-  if (encoding == String::ONE_BYTE_ENCODING) {
-    __ movzx_b(result, operand);
-  } else {
-    __ movzx_w(result, operand);
-  }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
-  String::Encoding encoding = instr->hydrogen()->encoding();
-  Register string = ToRegister(instr->string());
-
-  if (FLAG_debug_code) {
-    Register value = ToRegister(instr->value());
-    Register index = ToRegister(instr->index());
-    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
-    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
-    int encoding_mask =
-        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
-        ? one_byte_seq_type : two_byte_seq_type;
-    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
-  }
-
-  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
-  if (instr->value()->IsConstantOperand()) {
-    int value = ToRepresentation(LConstantOperand::cast(instr->value()),
-                                 Representation::Integer32());
-    DCHECK_LE(0, value);
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      DCHECK_LE(value, String::kMaxOneByteCharCode);
-      __ mov_b(operand, static_cast<int8_t>(value));
-    } else {
-      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
-      __ mov_w(operand, static_cast<int16_t>(value));
-    }
-  } else {
-    Register value = ToRegister(instr->value());
-    if (encoding == String::ONE_BYTE_ENCODING) {
-      __ mov_b(operand, value);
-    } else {
-      __ mov_w(operand, value);
-    }
-  }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-
-  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
-    if (right->IsConstantOperand()) {
-      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
-                                        instr->hydrogen()->representation());
-      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
-    } else {
-      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
-      __ lea(ToRegister(instr->result()), address);
-    }
-  } else {
-    if (right->IsConstantOperand()) {
-      __ add(ToOperand(left),
-             ToImmediate(right, instr->hydrogen()->representation()));
-    } else {
-      __ add(ToRegister(left), ToOperand(right));
-    }
-    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr, "overflow");
-    }
-  }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  DCHECK(left->Equals(instr->result()));
-  HMathMinMax::Operation operation = instr->hydrogen()->operation();
-  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
-    Label return_left;
-    Condition condition = (operation == HMathMinMax::kMathMin)
-        ? less_equal
-        : greater_equal;
-    if (right->IsConstantOperand()) {
-      Operand left_op = ToOperand(left);
-      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
-                                        instr->hydrogen()->representation());
-      __ cmp(left_op, immediate);
-      __ j(condition, &return_left, Label::kNear);
-      __ mov(left_op, immediate);
-    } else {
-      Register left_reg = ToRegister(left);
-      Operand right_op = ToOperand(right);
-      __ cmp(left_reg, right_op);
-      __ j(condition, &return_left, Label::kNear);
-      __ mov(left_reg, right_op);
-    }
-    __ bind(&return_left);
-  } else {
-    DCHECK(instr->hydrogen()->representation().IsDouble());
-    Label check_nan_left, check_zero, return_left, return_right;
-    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
-    XMMRegister left_reg = ToDoubleRegister(left);
-    XMMRegister right_reg = ToDoubleRegister(right);
-    __ ucomisd(left_reg, right_reg);
-    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
-    __ j(equal, &check_zero, Label::kNear);  // left == right.
-    __ j(condition, &return_left, Label::kNear);
-    __ jmp(&return_right, Label::kNear);
-
-    __ bind(&check_zero);
-    XMMRegister xmm_scratch = double_scratch0();
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(left_reg, xmm_scratch);
-    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
-    // At this point, both left and right are either 0 or -0.
-    if (operation == HMathMinMax::kMathMin) {
-      __ orpd(left_reg, right_reg);
-    } else {
-      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
-      __ addsd(left_reg, right_reg);
-    }
-    __ jmp(&return_left, Label::kNear);
-
-    __ bind(&check_nan_left);
-    __ ucomisd(left_reg, left_reg);  // NaN check.
-    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
-    __ bind(&return_right);
-    __ movaps(left_reg, right_reg);
-
-    __ bind(&return_left);
-  }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  XMMRegister left = ToDoubleRegister(instr->left());
-  XMMRegister right = ToDoubleRegister(instr->right());
-  XMMRegister result = ToDoubleRegister(instr->result());
-  switch (instr->op()) {
-    case Token::ADD:
-      __ addsd(left, right);
-      break;
-    case Token::SUB:
-      __ subsd(left, right);
-      break;
-    case Token::MUL:
-      __ mulsd(left, right);
-      break;
-    case Token::DIV:
-      __ divsd(left, right);
-      // Don't delete this mov. It may improve performance on some CPUs,
-      // when there is a mulsd depending on the result
-      __ movaps(left, left);
-      break;
-    case Token::MOD: {
-      // Pass two doubles as arguments on the stack.
-      __ PrepareCallCFunction(4, eax);
-      __ movsd(Operand(esp, 0 * kDoubleSize), left);
-      __ movsd(Operand(esp, 1 * kDoubleSize), right);
-      __ CallCFunction(
-          ExternalReference::mod_two_doubles_operation(isolate()),
-          4);
-
-      // Return value is in st(0) on ia32.
-      // Store it into the result register.
-      __ sub(Operand(esp), Immediate(kDoubleSize));
-      __ fstp_d(Operand(esp, 0));
-      __ movsd(result, Operand(esp, 0));
-      __ add(Operand(esp), Immediate(kDoubleSize));
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->left()).is(edx));
-  DCHECK(ToRegister(instr->right()).is(eax));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
-  CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
-  int left_block = instr->TrueDestination(chunk_);
-  int right_block = instr->FalseDestination(chunk_);
-
-  int next_block = GetNextEmittedBlock();
-
-  if (right_block == left_block || cc == no_condition) {
-    EmitGoto(left_block);
-  } else if (left_block == next_block) {
-    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
-  } else if (right_block == next_block) {
-    __ j(cc, chunk_->GetAssemblyLabel(left_block));
-  } else {
-    __ j(cc, chunk_->GetAssemblyLabel(left_block));
-    __ jmp(chunk_->GetAssemblyLabel(right_block));
-  }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
-  int false_block = instr->FalseDestination(chunk_);
-  if (cc == no_condition) {
-    __ jmp(chunk_->GetAssemblyLabel(false_block));
-  } else {
-    __ j(cc, chunk_->GetAssemblyLabel(false_block));
-  }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
-  Representation r = instr->hydrogen()->value()->representation();
-  if (r.IsSmiOrInteger32()) {
-    Register reg = ToRegister(instr->value());
-    __ test(reg, Operand(reg));
-    EmitBranch(instr, not_zero);
-  } else if (r.IsDouble()) {
-    DCHECK(!info()->IsStub());
-    XMMRegister reg = ToDoubleRegister(instr->value());
-    XMMRegister xmm_scratch = double_scratch0();
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(reg, xmm_scratch);
-    EmitBranch(instr, not_equal);
-  } else {
-    DCHECK(r.IsTagged());
-    Register reg = ToRegister(instr->value());
-    HType type = instr->hydrogen()->value()->type();
-    if (type.IsBoolean()) {
-      DCHECK(!info()->IsStub());
-      __ cmp(reg, factory()->true_value());
-      EmitBranch(instr, equal);
-    } else if (type.IsSmi()) {
-      DCHECK(!info()->IsStub());
-      __ test(reg, Operand(reg));
-      EmitBranch(instr, not_equal);
-    } else if (type.IsJSArray()) {
-      DCHECK(!info()->IsStub());
-      EmitBranch(instr, no_condition);
-    } else if (type.IsHeapNumber()) {
-      DCHECK(!info()->IsStub());
-      XMMRegister xmm_scratch = double_scratch0();
-      __ xorps(xmm_scratch, xmm_scratch);
-      __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
-      EmitBranch(instr, not_equal);
-    } else if (type.IsString()) {
-      DCHECK(!info()->IsStub());
-      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
-      EmitBranch(instr, not_equal);
-    } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
-
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
-        // undefined -> false.
-        __ cmp(reg, factory()->undefined_value());
-        __ j(equal, instr->FalseLabel(chunk_));
-      }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
-        // true -> true.
-        __ cmp(reg, factory()->true_value());
-        __ j(equal, instr->TrueLabel(chunk_));
-        // false -> false.
-        __ cmp(reg, factory()->false_value());
-        __ j(equal, instr->FalseLabel(chunk_));
-      }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
-        // 'null' -> false.
-        __ cmp(reg, factory()->null_value());
-        __ j(equal, instr->FalseLabel(chunk_));
-      }
-
-      if (expected.Contains(ToBooleanStub::SMI)) {
-        // Smis: 0 -> false, all other -> true.
-        __ test(reg, Operand(reg));
-        __ j(equal, instr->FalseLabel(chunk_));
-        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
-      } else if (expected.NeedsMap()) {
-        // If we need a map later and have a Smi -> deopt.
-        __ test(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr, "Smi");
-      }
-
-      Register map = no_reg;  // Keep the compiler happy.
-      if (expected.NeedsMap()) {
-        map = ToRegister(instr->temp());
-        DCHECK(!map.is(reg));
-        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
-
-        if (expected.CanBeUndetectable()) {
-          // Undetectable -> false.
-          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-                    1 << Map::kIsUndetectable);
-          __ j(not_zero, instr->FalseLabel(chunk_));
-        }
-      }
-
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
-        // spec object -> true.
-        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
-        __ j(above_equal, instr->TrueLabel(chunk_));
-      }
-
-      if (expected.Contains(ToBooleanStub::STRING)) {
-        // String value -> false iff empty.
-        Label not_string;
-        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
-        __ j(above_equal, &not_string, Label::kNear);
-        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
-        __ j(not_zero, instr->TrueLabel(chunk_));
-        __ jmp(instr->FalseLabel(chunk_));
-        __ bind(&not_string);
-      }
-
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
-        // Symbol value -> true.
-        __ CmpInstanceType(map, SYMBOL_TYPE);
-        __ j(equal, instr->TrueLabel(chunk_));
-      }
-
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
-        // heap number -> false iff +0, -0, or NaN.
-        Label not_heap_number;
-        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-               factory()->heap_number_map());
-        __ j(not_equal, &not_heap_number, Label::kNear);
-        XMMRegister xmm_scratch = double_scratch0();
-        __ xorps(xmm_scratch, xmm_scratch);
-        __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
-        __ j(zero, instr->FalseLabel(chunk_));
-        __ jmp(instr->TrueLabel(chunk_));
-        __ bind(&not_heap_number);
-      }
-
-      if (!expected.IsGeneric()) {
-        // We've seen something for the first time -> deopt.
-        // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr, "unexpected object");
-      }
-    }
-  }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
-  if (!IsNextEmittedBlock(block)) {
-    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
-  }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
-  EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
-  Condition cond = no_condition;
-  switch (op) {
-    case Token::EQ:
-    case Token::EQ_STRICT:
-      cond = equal;
-      break;
-    case Token::NE:
-    case Token::NE_STRICT:
-      cond = not_equal;
-      break;
-    case Token::LT:
-      cond = is_unsigned ? below : less;
-      break;
-    case Token::GT:
-      cond = is_unsigned ? above : greater;
-      break;
-    case Token::LTE:
-      cond = is_unsigned ? below_equal : less_equal;
-      break;
-    case Token::GTE:
-      cond = is_unsigned ? above_equal : greater_equal;
-      break;
-    case Token::IN:
-    case Token::INSTANCEOF:
-    default:
-      UNREACHABLE();
-  }
-  return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  bool is_unsigned =
-      instr->is_double() ||
-      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
-      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
-  Condition cc = TokenToCondition(instr->op(), is_unsigned);
-
-  if (left->IsConstantOperand() && right->IsConstantOperand()) {
-    // We can statically evaluate the comparison.
-    double left_val = ToDouble(LConstantOperand::cast(left));
-    double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
-    EmitGoto(next_block);
-  } else {
-    if (instr->is_double()) {
-      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-      // Don't base result on EFLAGS when a NaN is involved. Instead
-      // jump to the false block.
-      __ j(parity_even, instr->FalseLabel(chunk_));
-    } else {
-      if (right->IsConstantOperand()) {
-        __ cmp(ToOperand(left),
-               ToImmediate(right, instr->hydrogen()->representation()));
-      } else if (left->IsConstantOperand()) {
-        __ cmp(ToOperand(right),
-               ToImmediate(left, instr->hydrogen()->representation()));
-        // We commuted the operands, so commute the condition.
-        cc = CommuteCondition(cc);
-      } else {
-        __ cmp(ToRegister(left), ToOperand(right));
-      }
-    }
-    EmitBranch(instr, cc);
-  }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
-  Register left = ToRegister(instr->left());
-
-  if (instr->right()->IsConstantOperand()) {
-    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
-    __ CmpObject(left, right);
-  } else {
-    Operand right = ToOperand(instr->right());
-    __ cmp(left, right);
-  }
-  EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
-  if (instr->hydrogen()->representation().IsTagged()) {
-    Register input_reg = ToRegister(instr->object());
-    __ cmp(input_reg, factory()->the_hole_value());
-    EmitBranch(instr, equal);
-    return;
-  }
-
-  XMMRegister input_reg = ToDoubleRegister(instr->object());
-  __ ucomisd(input_reg, input_reg);
-  EmitFalseBranch(instr, parity_odd);
-
-  __ sub(esp, Immediate(kDoubleSize));
-  __ movsd(MemOperand(esp, 0), input_reg);
-
-  __ add(esp, Immediate(kDoubleSize));
-  int offset = sizeof(kHoleNanUpper32);
-  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
-  EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    XMMRegister value = ToDoubleRegister(instr->value());
-    XMMRegister xmm_scratch = double_scratch0();
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(xmm_scratch, value);
-    EmitFalseBranch(instr, not_equal);
-    __ movmskpd(scratch, value);
-    __ test(scratch, Immediate(1));
-    EmitBranch(instr, not_zero);
-  } else {
-    Register value = ToRegister(instr->value());
-    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
-    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
-           Immediate(0x1));
-    EmitFalseBranch(instr, no_overflow);
-    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
-           Immediate(0x00000000));
-    EmitBranch(instr, equal);
-  }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
-                                 Register temp1,
-                                 Label* is_not_object,
-                                 Label* is_object) {
-  __ JumpIfSmi(input, is_not_object);
-
-  __ cmp(input, isolate()->factory()->null_value());
-  __ j(equal, is_object);
-
-  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined.
-  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  __ j(not_zero, is_not_object);
-
-  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
-  __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  __ j(below, is_not_object);
-  __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
-  Register reg = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  Condition true_cond = EmitIsObject(
-      reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
-  EmitBranch(instr, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
-                                 Register temp1,
-                                 Label* is_not_string,
-                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
-  if (check_needed == INLINE_SMI_CHECK) {
-    __ JumpIfSmi(input, is_not_string);
-  }
-
-  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
-  return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
-  Register reg = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  SmiCheck check_needed =
-      instr->hydrogen()->value()->type().IsHeapObject()
-          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
-  Condition true_cond = EmitIsString(
-      reg, temp, instr->FalseLabel(chunk_), check_needed);
-
-  EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
-  Operand input = ToOperand(instr->value());
-
-  __ test(input, Immediate(kSmiTagMask));
-  EmitBranch(instr, zero);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
-    STATIC_ASSERT(kSmiTag == 0);
-    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
-  }
-  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  EmitBranch(instr, not_zero);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  Token::Value op = instr->op();
-
-  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  Condition condition = ComputeCompareCondition(op);
-  __ test(eax, Operand(eax));
-
-  EmitBranch(instr, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
-  InstanceType from = instr->from();
-  InstanceType to = instr->to();
-  if (from == FIRST_TYPE) return to;
-  DCHECK(from == to || to == LAST_TYPE);
-  return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
-  InstanceType from = instr->from();
-  InstanceType to = instr->to();
-  if (from == to) return equal;
-  if (to == LAST_TYPE) return above_equal;
-  if (from == FIRST_TYPE) return below_equal;
-  UNREACHABLE();
-  return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
-    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
-  }
-
-  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
-  EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-
-  __ AssertString(input);
-
-  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-
-  __ test(FieldOperand(input, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(instr, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag.  Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
-                               Label* is_false,
-                               Handle<String>class_name,
-                               Register input,
-                               Register temp,
-                               Register temp2) {
-  DCHECK(!input.is(temp));
-  DCHECK(!input.is(temp2));
-  DCHECK(!temp.is(temp2));
-  __ JumpIfSmi(input, is_false);
-
-  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    // Assuming the following assertions, we can use the same compares to test
-    // for both being a function type and being in the object type range.
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  FIRST_SPEC_OBJECT_TYPE + 1);
-    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  LAST_SPEC_OBJECT_TYPE - 1);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-    __ j(below, is_false);
-    __ j(equal, is_true);
-    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
-    __ j(equal, is_true);
-  } else {
-    // Faster code path to avoid two compares: subtract lower bound from the
-    // actual type and do a signed compare with the width of the type range.
-    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
-    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
-                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ j(above, is_false);
-  }
-
-  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
-  // Check if the constructor in the map is a function.
-  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-  // Objects with a non-function constructor have class 'Object'.
-  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
-  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
-    __ j(not_equal, is_true);
-  } else {
-    __ j(not_equal, is_false);
-  }
-
-  // temp now contains the constructor function. Grab the
-  // instance class name from there.
-  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(temp, FieldOperand(temp,
-                            SharedFunctionInfo::kInstanceClassNameOffset));
-  // The class name we are testing against is internalized since it's a literal.
-  // The name in the constructor is internalized because of the way the context
-  // is booted.  This routine isn't expected to work for random API-created
-  // classes and it doesn't have to because you can't access it with natives
-  // syntax.  Since both sides are internalized it is sufficient to use an
-  // identity comparison.
-  __ cmp(temp, class_name);
-  // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-  Register temp2 = ToRegister(instr->temp2());
-
-  Handle<String> class_name = instr->hydrogen()->class_name();
-
-  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
-      class_name, input, temp, temp2);
-
-  EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
-  Register reg = ToRegister(instr->value());
-  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
-  EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  // Object and function are in fixed registers defined by the stub.
-  DCHECK(ToRegister(instr->context()).is(esi));
-  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
-  Label true_value, done;
-  __ test(eax, Operand(eax));
-  __ j(zero, &true_value, Label::kNear);
-  __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done, Label::kNear);
-  __ bind(&true_value);
-  __ mov(ToRegister(instr->result()), factory()->true_value());
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
-   public:
-    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
-                                  LInstanceOfKnownGlobal* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-    Label* map_check() { return &map_check_; }
-   private:
-    LInstanceOfKnownGlobal* instr_;
-    Label map_check_;
-  };
-
-  DeferredInstanceOfKnownGlobal* deferred;
-  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
-  Label done, false_result;
-  Register object = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  // A Smi is not an instance of anything.
-  __ JumpIfSmi(object, &false_result, Label::kNear);
-
-  // This is the inlined call site instanceof cache. The two occurences of the
-  // hole value will be patched to the last map/result pair generated by the
-  // instanceof stub.
-  Label cache_miss;
-  Register map = ToRegister(instr->temp());
-  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
-  __ bind(deferred->map_check());  // Label for calculating code patching.
-  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
-  __ cmp(map, Operand::ForCell(cache_cell));  // Patched to cached map.
-  __ j(not_equal, &cache_miss, Label::kNear);
-  __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
-  __ jmp(&done, Label::kNear);
-
-  // The inlined call site cache did not match. Check for null and string
-  // before calling the deferred code.
-  __ bind(&cache_miss);
-  // Null is not an instance of anything.
-  __ cmp(object, factory()->null_value());
-  __ j(equal, &false_result, Label::kNear);
-
-  // String values are not instances of anything.
-  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
-  __ j(is_string, &false_result, Label::kNear);
-
-  // Go to the deferred code.
-  __ jmp(deferred->entry());
-
-  __ bind(&false_result);
-  __ mov(ToRegister(instr->result()), factory()->false_value());
-
-  // Here result has either true or false. Deferred code also produces true or
-  // false object.
-  __ bind(deferred->exit());
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                               Label* map_check) {
-  PushSafepointRegistersScope scope(this);
-
-  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kArgsInRegisters);
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kCallSiteInlineCheck);
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kReturnTrueFalseObject);
-  InstanceofStub stub(isolate(), flags);
-
-  // Get the temp register reserved by the instruction. This needs to be a
-  // register which is pushed last by PushSafepointRegisters as top of the
-  // stack is used to pass the offset to the location of the map check to
-  // the stub.
-  Register temp = ToRegister(instr->temp());
-  DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
-  __ LoadHeapObject(InstanceofStub::right(), instr->function());
-  static const int kAdditionalDelta = 13;
-  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
-  __ mov(temp, Immediate(delta));
-  __ StoreToSafepointRegisterSlot(temp, temp);
-  CallCodeGeneric(stub.GetCode(),
-                  RelocInfo::CODE_TARGET,
-                  instr,
-                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  // Get the deoptimization index of the LLazyBailout-environment that
-  // corresponds to this instruction.
-  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-
-  // Put the result value into the eax slot and restore all registers.
-  __ StoreToSafepointRegisterSlot(eax, eax);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
-  Token::Value op = instr->op();
-
-  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  Condition condition = ComputeCompareCondition(op);
-  Label true_value, done;
-  __ test(eax, Operand(eax));
-  __ j(condition, &true_value, Label::kNear);
-  __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done, Label::kNear);
-  __ bind(&true_value);
-  __ mov(ToRegister(instr->result()), factory()->true_value());
-  __ bind(&done);
-}
-
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
-  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
-
-  if (instr->has_constant_parameter_count()) {
-    int parameter_count = ToInteger32(instr->constant_parameter_count());
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      __ cmp(Operand(esp,
-                     (parameter_count + extra_value_count) * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
-    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
-  } else {
-    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
-    Register reg = ToRegister(instr->parameter_count());
-    // The argument count parameter is a smi
-    __ SmiUntag(reg);
-    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      DCHECK(extra_value_count == 2);
-      __ cmp(Operand(esp, reg, times_pointer_size,
-                     extra_value_count * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
-
-    // emit code to restore stack based on instr->parameter_count()
-    __ pop(return_addr_reg);  // save return address
-    if (dynamic_frame_alignment) {
-      __ inc(reg);  // 1 more for alignment
-    }
-
-    __ shl(reg, kPointerSizeLog2);
-    __ add(esp, reg);
-    __ jmp(return_addr_reg);
-  }
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace && info()->IsOptimizing()) {
-    // Preserve the return value on the stack and rely on the runtime call
-    // to return the value in the same register.  We're leaving the code
-    // managed by the register allocator and tearing down the frame, it's
-    // safe to write to the context register.
-    __ push(eax);
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  if (info()->saves_caller_doubles()) RestoreCallerDoubles();
-  if (dynamic_frame_alignment_) {
-    // Fetch the state of the dynamic frame alignment.
-    __ mov(edx, Operand(ebp,
-      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
-  }
-  int no_frame_start = -1;
-  if (NeedsEagerFrame()) {
-    __ mov(esp, ebp);
-    __ pop(ebp);
-    no_frame_start = masm_->pc_offset();
-  }
-  if (dynamic_frame_alignment_) {
-    Label no_padding;
-    __ cmp(edx, Immediate(kNoAlignmentPadding));
-    __ j(equal, &no_padding, Label::kNear);
-
-    EmitReturn(instr, true);
-    __ bind(&no_padding);
-  }
-
-  EmitReturn(instr, false);
-  if (no_frame_start != -1) {
-    info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
-  }
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
-  Register result = ToRegister(instr->result());
-  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(result, factory()->the_hole_value());
-    DeoptimizeIf(equal, instr, "hole");
-  }
-}
-
-
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
-  DCHECK(FLAG_vector_ics);
-  Register vector_register = ToRegister(instr->temp_vector());
-  Register slot_register = VectorLoadICDescriptor::SlotRegister();
-  DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
-  DCHECK(slot_register.is(eax));
-
-  AllowDeferredHandleDereference vector_structure_check;
-  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-  __ mov(vector_register, vector);
-  // No need to allocate this register.
-  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
-  int index = vector->GetIndex(slot);
-  __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->global_object())
-             .is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
-  if (FLAG_vector_ics) {
-    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  }
-  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->value());
-  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
-
-  // If the cell we are storing to contains the hole it could have
-  // been deleted from the property dictionary. In that case, we need
-  // to update the property details in the property dictionary to mark
-  // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
-    DeoptimizeIf(equal, instr, "hole");
-  }
-
-  // Store the value.
-  __ mov(Operand::ForCell(cell_handle), value);
-  // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
-  Register context = ToRegister(instr->context());
-  Register result = ToRegister(instr->result());
-  __ mov(result, ContextOperand(context, instr->slot_index()));
-
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(result, factory()->the_hole_value());
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr, "hole");
-    } else {
-      Label is_not_hole;
-      __ j(not_equal, &is_not_hole, Label::kNear);
-      __ mov(result, factory()->undefined_value());
-      __ bind(&is_not_hole);
-    }
-  }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
-  Register context = ToRegister(instr->context());
-  Register value = ToRegister(instr->value());
-
-  Label skip_assignment;
-
-  Operand target = ContextOperand(context, instr->slot_index());
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(target, factory()->the_hole_value());
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr, "hole");
-    } else {
-      __ j(not_equal, &skip_assignment, Label::kNear);
-    }
-  }
-
-  __ mov(target, value);
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    SmiCheck check_needed =
-        instr->hydrogen()->value()->type().IsHeapObject()
-            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    Register temp = ToRegister(instr->temp());
-    int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWriteContextSlot(context,
-                              offset,
-                              value,
-                              temp,
-                              kSaveFPRegs,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
-  }
-
-  __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
-  HObjectAccess access = instr->hydrogen()->access();
-  int offset = access.offset();
-
-  if (access.IsExternalMemory()) {
-    Register result = ToRegister(instr->result());
-    MemOperand operand = instr->object()->IsConstantOperand()
-        ? MemOperand::StaticVariable(ToExternalReference(
-                LConstantOperand::cast(instr->object())))
-        : MemOperand(ToRegister(instr->object()), offset);
-    __ Load(result, operand, access.representation());
-    return;
-  }
-
-  Register object = ToRegister(instr->object());
-  if (instr->hydrogen()->representation().IsDouble()) {
-    XMMRegister result = ToDoubleRegister(instr->result());
-    __ movsd(result, FieldOperand(object, offset));
-    return;
-  }
-
-  Register result = ToRegister(instr->result());
-  if (!access.IsInobject()) {
-    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
-    object = result;
-  }
-  __ Load(result, FieldOperand(object, offset), access.representation());
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
-  DCHECK(!operand->IsDoubleRegister());
-  if (operand->IsConstantOperand()) {
-    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
-    AllowDeferredHandleDereference smi_check;
-    if (object->IsSmi()) {
-      __ Push(Handle<Smi>::cast(object));
-    } else {
-      __ PushHeapObject(Handle<HeapObject>::cast(object));
-    }
-  } else if (operand->IsRegister()) {
-    __ push(ToRegister(operand));
-  } else {
-    __ push(ToOperand(operand));
-  }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  __ mov(LoadDescriptor::NameRegister(), instr->name());
-  if (FLAG_vector_ics) {
-    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  }
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
-  Register function = ToRegister(instr->function());
-  Register temp = ToRegister(instr->temp());
-  Register result = ToRegister(instr->result());
-
-  // Get the prototype or initial map from the function.
-  __ mov(result,
-         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Check that the function has a prototype or an initial map.
-  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
-  DeoptimizeIf(equal, instr, "hole");
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  __ CmpObjectType(result, MAP_TYPE, temp);
-  __ j(not_equal, &done, Label::kNear);
-
-  // Get the prototype from the initial map.
-  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
-  Register result = ToRegister(instr->result());
-  __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
-  Register arguments = ToRegister(instr->arguments());
-  Register result = ToRegister(instr->result());
-  if (instr->length()->IsConstantOperand() &&
-      instr->index()->IsConstantOperand()) {
-    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
-    int index = (const_length - const_index) + 1;
-    __ mov(result, Operand(arguments, index * kPointerSize));
-  } else {
-    Register length = ToRegister(instr->length());
-    Operand index = ToOperand(instr->index());
-    // There are two words between the frame pointer and the last argument.
-    // Subtracting from length accounts for one of them add one more.
-    __ sub(length, index);
-    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
-  }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
-  ElementsKind elements_kind = instr->elements_kind();
-  LOperand* key = instr->key();
-  if (!key->IsConstantOperand() &&
-      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
-                                  elements_kind)) {
-    __ SmiUntag(ToRegister(key));
-  }
-  Operand operand(BuildFastArrayOperand(
-      instr->elements(),
-      key,
-      instr->hydrogen()->key()->representation(),
-      elements_kind,
-      instr->base_offset()));
-  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
-      elements_kind == FLOAT32_ELEMENTS) {
-    XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, operand);
-    __ cvtss2sd(result, result);
-  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
-             elements_kind == FLOAT64_ELEMENTS) {
-    __ movsd(ToDoubleRegister(instr->result()), operand);
-  } else {
-    Register result(ToRegister(instr->result()));
-    switch (elements_kind) {
-      case EXTERNAL_INT8_ELEMENTS:
-      case INT8_ELEMENTS:
-        __ movsx_b(result, operand);
-        break;
-      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
-      case EXTERNAL_UINT8_ELEMENTS:
-      case UINT8_ELEMENTS:
-      case UINT8_CLAMPED_ELEMENTS:
-        __ movzx_b(result, operand);
-        break;
-      case EXTERNAL_INT16_ELEMENTS:
-      case INT16_ELEMENTS:
-        __ movsx_w(result, operand);
-        break;
-      case EXTERNAL_UINT16_ELEMENTS:
-      case UINT16_ELEMENTS:
-        __ movzx_w(result, operand);
-        break;
-      case EXTERNAL_INT32_ELEMENTS:
-      case INT32_ELEMENTS:
-        __ mov(result, operand);
-        break;
-      case EXTERNAL_UINT32_ELEMENTS:
-      case UINT32_ELEMENTS:
-        __ mov(result, operand);
-        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
-          __ test(result, Operand(result));
-          DeoptimizeIf(negative, instr, "negative value");
-        }
-        break;
-      case EXTERNAL_FLOAT32_ELEMENTS:
-      case EXTERNAL_FLOAT64_ELEMENTS:
-      case FLOAT32_ELEMENTS:
-      case FLOAT64_ELEMENTS:
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case SLOPPY_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(), instr->key(),
-        instr->hydrogen()->key()->representation(),
-        FAST_DOUBLE_ELEMENTS,
-        instr->base_offset() + sizeof(kHoleNanLower32));
-    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr, "hole");
-  }
-
-  Operand double_load_operand = BuildFastArrayOperand(
-      instr->elements(),
-      instr->key(),
-      instr->hydrogen()->key()->representation(),
-      FAST_DOUBLE_ELEMENTS,
-      instr->base_offset());
-  XMMRegister result = ToDoubleRegister(instr->result());
-  __ movsd(result, double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
-  Register result = ToRegister(instr->result());
-
-  // Load the result.
-  __ mov(result,
-         BuildFastArrayOperand(instr->elements(), instr->key(),
-                               instr->hydrogen()->key()->representation(),
-                               FAST_ELEMENTS, instr->base_offset()));
-
-  // Check for the hole value.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
-      __ test(result, Immediate(kSmiTagMask));
-      DeoptimizeIf(not_equal, instr, "not a Smi");
-    } else {
-      __ cmp(result, factory()->the_hole_value());
-      DeoptimizeIf(equal, instr, "hole");
-    }
-  }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
-  if (instr->is_typed_elements()) {
-    DoLoadKeyedExternalArray(instr);
-  } else if (instr->hydrogen()->representation().IsDouble()) {
-    DoLoadKeyedFixedDoubleArray(instr);
-  } else {
-    DoLoadKeyedFixedArray(instr);
-  }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
-    LOperand* elements_pointer,
-    LOperand* key,
-    Representation key_representation,
-    ElementsKind elements_kind,
-    uint32_t base_offset) {
-  Register elements_pointer_reg = ToRegister(elements_pointer);
-  int element_shift_size = ElementsKindToShiftSize(elements_kind);
-  int shift_size = element_shift_size;
-  if (key->IsConstantOperand()) {
-    int constant_value = ToInteger32(LConstantOperand::cast(key));
-    if (constant_value & 0xF0000000) {
-      Abort(kArrayIndexConstantValueTooBig);
-    }
-    return Operand(elements_pointer_reg,
-                   ((constant_value) << shift_size)
-                       + base_offset);
-  } else {
-    // Take the tag bit into account while computing the shift size.
-    if (key_representation.IsSmi() && (shift_size >= 1)) {
-      shift_size -= kSmiTagSize;
-    }
-    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
-    return Operand(elements_pointer_reg,
-                   ToRegister(key),
-                   scale_factor,
-                   base_offset);
-  }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
-  if (FLAG_vector_ics) {
-    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-  }
-
-  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
-  Register result = ToRegister(instr->result());
-
-  if (instr->hydrogen()->from_inlined()) {
-    __ lea(result, Operand(esp, -2 * kPointerSize));
-  } else {
-    // Check for arguments adapter frame.
-    Label done, adapted;
-    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
-    __ cmp(Operand(result),
-           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-    __ j(equal, &adapted, Label::kNear);
-
-    // No arguments adaptor frame.
-    __ mov(result, Operand(ebp));
-    __ jmp(&done, Label::kNear);
-
-    // Arguments adaptor frame present.
-    __ bind(&adapted);
-    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-    // Result is the frame pointer for the frame if not adapted and for the real
-    // frame below the adaptor frame if adapted.
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
-  Operand elem = ToOperand(instr->elements());
-  Register result = ToRegister(instr->result());
-
-  Label done;
-
-  // If no arguments adaptor frame the number of arguments is fixed.
-  __ cmp(ebp, elem);
-  __ mov(result, Immediate(scope()->num_parameters()));
-  __ j(equal, &done, Label::kNear);
-
-  // Arguments adaptor frame present. Get argument length from there.
-  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(result, Operand(result,
-                         ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(result);
-
-  // Argument length is in result register.
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-
-  // If the receiver is null or undefined, we have to pass the global
-  // object as a receiver to normal functions. Values have to be
-  // passed unchanged to builtins and strict-mode functions.
-  Label receiver_ok, global_object;
-  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-  Register scratch = ToRegister(instr->temp());
-
-  if (!instr->hydrogen()->known_function()) {
-    // Do not transform the receiver to object for strict mode
-    // functions.
-    __ mov(scratch,
-           FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-    __ j(not_equal, &receiver_ok, dist);
-
-    // Do not transform the receiver to object for builtins.
-    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
-    __ j(not_equal, &receiver_ok, dist);
-  }
-
-  // Normal function. Replace undefined or null with global receiver.
-  __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object, Label::kNear);
-  __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object, Label::kNear);
-
-  // The receiver should be a JS object.
-  __ test(receiver, Immediate(kSmiTagMask));
-  DeoptimizeIf(equal, instr, "Smi");
-  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
-  DeoptimizeIf(below, instr, "not a JavaScript object");
-
-  __ jmp(&receiver_ok, Label::kNear);
-  __ bind(&global_object);
-  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
-  const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ mov(receiver, Operand(receiver, global_offset));
-  const int proxy_offset = GlobalObject::kGlobalProxyOffset;
-  __ mov(receiver, FieldOperand(receiver, proxy_offset));
-  __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  DCHECK(receiver.is(eax));  // Used for parameter count.
-  DCHECK(function.is(edi));  // Required by InvokeFunction.
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  // Copy the arguments to this function possibly from the
-  // adaptor frame below it.
-  const uint32_t kArgumentsLimit = 1 * KB;
-  __ cmp(length, kArgumentsLimit);
-  DeoptimizeIf(above, instr, "too many arguments");
-
-  __ push(receiver);
-  __ mov(receiver, length);
-
-  // Loop through the arguments pushing them onto the execution
-  // stack.
-  Label invoke, loop;
-  // length is a small non-negative integer, due to the test above.
-  __ test(length, Operand(length));
-  __ j(zero, &invoke, Label::kNear);
-  __ bind(&loop);
-  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
-  __ dec(length);
-  __ j(not_zero, &loop);
-
-  // Invoke the function.
-  __ bind(&invoke);
-  DCHECK(instr->HasPointerMap());
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount actual(eax);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
-  __ int3();
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
-  LOperand* argument = instr->value();
-  EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
-  __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
-  Register result = ToRegister(instr->result());
-  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
-  Register result = ToRegister(instr->result());
-  if (info()->IsOptimizing()) {
-    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
-  } else {
-    // If there is no frame, the context must be in esi.
-    DCHECK(result.is(esi));
-  }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  __ push(esi);  // The context is the first argument.
-  __ push(Immediate(instr->hydrogen()->pairs()));
-  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
-  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
-                                 int formal_parameter_count,
-                                 int arity,
-                                 LInstruction* instr,
-                                 EDIState edi_state) {
-  bool dont_adapt_arguments =
-      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
-  bool can_invoke_directly =
-      dont_adapt_arguments || formal_parameter_count == arity;
-
-  if (can_invoke_directly) {
-    if (edi_state == EDI_UNINITIALIZED) {
-      __ LoadHeapObject(edi, function);
-    }
-
-    // Change context.
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-    // Set eax to arguments count if adaption is not needed. Assumes that eax
-    // is available to write to at this point.
-    if (dont_adapt_arguments) {
-      __ mov(eax, arity);
-    }
-
-    // Invoke function directly.
-    if (function.is_identical_to(info()->closure())) {
-      __ CallSelf();
-    } else {
-      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-    }
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-  } else {
-    // We need to adapt arguments.
-    LPointerMap* pointers = instr->pointer_map();
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
-    ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
-  }
-}
-
-
-void LCodeGen::DoTailCallThroughMegamorphicCache(
-    LTailCallThroughMegamorphicCache* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register name = ToRegister(instr->name());
-  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(name.is(LoadDescriptor::NameRegister()));
-  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
-  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
-
-  Register scratch = ebx;
-  Register extra = edi;
-  DCHECK(!extra.is(slot) && !extra.is(vector));
-  DCHECK(!scratch.is(receiver) && !scratch.is(name));
-  DCHECK(!extra.is(receiver) && !extra.is(name));
-
-  // Important for the tail-call.
-  bool must_teardown_frame = NeedsEagerFrame();
-
-  if (!instr->hydrogen()->is_just_miss()) {
-    if (FLAG_vector_ics) {
-      __ push(slot);
-      __ push(vector);
-    }
-
-    // The probe will tail call to a handler if found.
-    // If --vector-ics is on, then it knows to pop the two args first.
-    DCHECK(!instr->hydrogen()->is_keyed_load());
-    isolate()->stub_cache()->GenerateProbe(
-        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
-        receiver, name, scratch, extra);
-
-    if (FLAG_vector_ics) {
-      __ pop(vector);
-      __ pop(slot);
-    }
-  }
-
-  // Tail call to miss if we ended up here.
-  if (must_teardown_frame) __ leave();
-  if (instr->hydrogen()->is_keyed_load()) {
-    KeyedLoadIC::GenerateMiss(masm());
-  } else {
-    LoadIC::GenerateMiss(masm());
-  }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  if (instr->hydrogen()->IsTailCall()) {
-    if (NeedsEagerFrame()) __ leave();
-
-    if (instr->target()->IsConstantOperand()) {
-      LConstantOperand* target = LConstantOperand::cast(instr->target());
-      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-      __ jmp(code, RelocInfo::CODE_TARGET);
-    } else {
-      DCHECK(instr->target()->IsRegister());
-      Register target = ToRegister(instr->target());
-      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(target);
-    }
-  } else {
-    LPointerMap* pointers = instr->pointer_map();
-    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
-    if (instr->target()->IsConstantOperand()) {
-      LConstantOperand* target = LConstantOperand::cast(instr->target());
-      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-      __ call(code, RelocInfo::CODE_TARGET);
-    } else {
-      DCHECK(instr->target()->IsRegister());
-      Register target = ToRegister(instr->target());
-      generator.BeforeCall(__ CallSize(Operand(target)));
-      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ call(target);
-    }
-    generator.AfterCall();
-  }
-}
-
-
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  if (instr->hydrogen()->pass_argument_count()) {
-    __ mov(eax, instr->arity());
-  }
-
-  // Change context.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  bool is_self_call = false;
-  if (instr->hydrogen()->function()->IsConstant()) {
-    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
-    Handle<JSFunction> jsfun =
-      Handle<JSFunction>::cast(fun_const->handle(isolate()));
-    is_self_call = jsfun.is_identical_to(info()->closure());
-  }
-
-  if (is_self_call) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  }
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
-  Register input_reg = ToRegister(instr->value());
-  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-         factory()->heap_number_map());
-  DeoptimizeIf(not_equal, instr, "not a heap number");
-
-  Label slow, allocated, done;
-  Register tmp = input_reg.is(eax) ? ecx : eax;
-  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
-
-  // Preserve the value of all registers.
-  PushSafepointRegistersScope scope(this);
-
-  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
-  // Check the sign of the argument. If the argument is positive, just
-  // return it. We do not need to patch the stack since |input| and
-  // |result| are the same register and |input| will be restored
-  // unchanged by popping safepoint registers.
-  __ test(tmp, Immediate(HeapNumber::kSignMask));
-  __ j(zero, &done, Label::kNear);
-
-  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
-  __ jmp(&allocated, Label::kNear);
-
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
-                          instr, instr->context());
-  // Set the pointer to the new heap number in tmp.
-  if (!tmp.is(eax)) __ mov(tmp, eax);
-  // Restore input_reg after call to runtime.
-  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
-  __ bind(&allocated);
-  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
-  __ and_(tmp2, ~HeapNumber::kSignMask);
-  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
-  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
-  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
-  __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
-  __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
-  Register input_reg = ToRegister(instr->value());
-  __ test(input_reg, Operand(input_reg));
-  Label is_positive;
-  __ j(not_sign, &is_positive, Label::kNear);
-  __ neg(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr, "overflow");
-  __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
-  // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
-   public:
-    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
-                                    LMathAbs* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LMathAbs* instr_;
-  };
-
-  DCHECK(instr->value()->Equals(instr->result()));
-  Representation r = instr->hydrogen()->value()->representation();
-
-  if (r.IsDouble()) {
-    XMMRegister scratch = double_scratch0();
-    XMMRegister input_reg = ToDoubleRegister(instr->value());
-    __ xorps(scratch, scratch);
-    __ subsd(scratch, input_reg);
-    __ andps(input_reg, scratch);
-  } else if (r.IsSmiOrInteger32()) {
-    EmitIntegerMathAbs(instr);
-  } else {  // Tagged case.
-    DeferredMathAbsTaggedHeapNumber* deferred =
-        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
-    Register input_reg = ToRegister(instr->value());
-    // Smi check.
-    __ JumpIfNotSmi(input_reg, deferred->entry());
-    EmitIntegerMathAbs(instr);
-    __ bind(deferred->exit());
-  }
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
-  XMMRegister xmm_scratch = double_scratch0();
-  Register output_reg = ToRegister(instr->result());
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-
-  if (CpuFeatures::IsSupported(SSE4_1)) {
-    CpuFeatureScope scope(masm(), SSE4_1);
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      // Deoptimize on negative zero.
-      Label non_zero;
-      __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
-      __ ucomisd(input_reg, xmm_scratch);
-      __ j(not_equal, &non_zero, Label::kNear);
-      __ movmskpd(output_reg, input_reg);
-      __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr, "minus zero");
-      __ bind(&non_zero);
-    }
-    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
-    __ cvttsd2si(output_reg, Operand(xmm_scratch));
-    // Overflow is signalled with minint.
-    __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr, "overflow");
-  } else {
-    Label negative_sign, done;
-    // Deoptimize on unordered.
-    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(parity_even, instr, "NaN");
-    __ j(below, &negative_sign, Label::kNear);
-
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      // Check for negative zero.
-      Label positive_sign;
-      __ j(above, &positive_sign, Label::kNear);
-      __ movmskpd(output_reg, input_reg);
-      __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr, "minus zero");
-      __ Move(output_reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-      __ bind(&positive_sign);
-    }
-
-    // Use truncating instruction (OK because input is positive).
-    __ cvttsd2si(output_reg, Operand(input_reg));
-    // Overflow is signalled with minint.
-    __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr, "overflow");
-    __ jmp(&done, Label::kNear);
-
-    // Non-zero negative reaches here.
-    __ bind(&negative_sign);
-    // Truncate, then compare and compensate.
-    __ cvttsd2si(output_reg, Operand(input_reg));
-    __ Cvtsi2sd(xmm_scratch, output_reg);
-    __ ucomisd(input_reg, xmm_scratch);
-    __ j(equal, &done, Label::kNear);
-    __ sub(output_reg, Immediate(1));
-    DeoptimizeIf(overflow, instr, "overflow");
-
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
-  Register output_reg = ToRegister(instr->result());
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  XMMRegister xmm_scratch = double_scratch0();
-  XMMRegister input_temp = ToDoubleRegister(instr->temp());
-  ExternalReference one_half = ExternalReference::address_of_one_half();
-  ExternalReference minus_one_half =
-      ExternalReference::address_of_minus_one_half();
-
-  Label done, round_to_zero, below_one_half, do_not_compensate;
-  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-
-  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
-  __ ucomisd(xmm_scratch, input_reg);
-  __ j(above, &below_one_half, Label::kNear);
-
-  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
-  __ addsd(xmm_scratch, input_reg);
-  __ cvttsd2si(output_reg, Operand(xmm_scratch));
-  // Overflow is signalled with minint.
-  __ cmp(output_reg, 0x1);
-  DeoptimizeIf(overflow, instr, "overflow");
-  __ jmp(&done, dist);
-
-  __ bind(&below_one_half);
-  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
-  __ ucomisd(xmm_scratch, input_reg);
-  __ j(below_equal, &round_to_zero, Label::kNear);
-
-  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
-  // compare and compensate.
-  __ movaps(input_temp, input_reg);  // Do not alter input_reg.
-  __ subsd(input_temp, xmm_scratch);
-  __ cvttsd2si(output_reg, Operand(input_temp));
-  // Catch minint due to overflow, and to prevent overflow when compensating.
-  __ cmp(output_reg, 0x1);
-  DeoptimizeIf(overflow, instr, "overflow");
-
-  __ Cvtsi2sd(xmm_scratch, output_reg);
-  __ ucomisd(xmm_scratch, input_temp);
-  __ j(equal, &done, dist);
-  __ sub(output_reg, Immediate(1));
-  // No overflow because we already ruled out minint.
-  __ jmp(&done, dist);
-
-  __ bind(&round_to_zero);
-  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
-  // we can ignore the difference between a result of -0 and +0.
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // If the sign is positive, we return +0.
-    __ movmskpd(output_reg, input_reg);
-    __ test(output_reg, Immediate(1));
-    DeoptimizeIf(not_zero, instr, "minus zero");
-  }
-  __ Move(output_reg, Immediate(0));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  XMMRegister output_reg = ToDoubleRegister(instr->result());
-  __ cvtsd2ss(output_reg, input_reg);
-  __ cvtss2sd(output_reg, output_reg);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
-  Operand input = ToOperand(instr->value());
-  XMMRegister output = ToDoubleRegister(instr->result());
-  __ sqrtsd(output, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
-  XMMRegister xmm_scratch = double_scratch0();
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  Register scratch = ToRegister(instr->temp());
-  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
-
-  // Note that according to ECMA-262 15.8.2.13:
-  // Math.pow(-Infinity, 0.5) == Infinity
-  // Math.sqrt(-Infinity) == NaN
-  Label done, sqrt;
-  // Check base for -Infinity.  According to IEEE-754, single-precision
-  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
-  __ mov(scratch, 0xFF800000);
-  __ movd(xmm_scratch, scratch);
-  __ cvtss2sd(xmm_scratch, xmm_scratch);
-  __ ucomisd(input_reg, xmm_scratch);
-  // Comparing -Infinity with NaN results in "unordered", which sets the
-  // zero flag as if both were equal.  However, it also sets the carry flag.
-  __ j(not_equal, &sqrt, Label::kNear);
-  __ j(carry, &sqrt, Label::kNear);
-  // If input is -Infinity, return Infinity.
-  __ xorps(input_reg, input_reg);
-  __ subsd(input_reg, xmm_scratch);
-  __ jmp(&done, Label::kNear);
-
-  // Square root.
-  __ bind(&sqrt);
-  __ xorps(xmm_scratch, xmm_scratch);
-  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
-  __ sqrtsd(input_reg, input_reg);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
-  Representation exponent_type = instr->hydrogen()->right()->representation();
-  // Having marked this as a call, we can use any registers.
-  // Just make sure that the input/output registers are the expected ones.
-  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
-  DCHECK(!instr->right()->IsDoubleRegister() ||
-         ToDoubleRegister(instr->right()).is(xmm1));
-  DCHECK(!instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(tagged_exponent));
-  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
-  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
-
-  if (exponent_type.IsSmi()) {
-    MathPowStub stub(isolate(), MathPowStub::TAGGED);
-    __ CallStub(&stub);
-  } else if (exponent_type.IsTagged()) {
-    Label no_deopt;
-    __ JumpIfSmi(tagged_exponent, &no_deopt);
-    DCHECK(!ecx.is(tagged_exponent));
-    __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
-    DeoptimizeIf(not_equal, instr, "not a heap number");
-    __ bind(&no_deopt);
-    MathPowStub stub(isolate(), MathPowStub::TAGGED);
-    __ CallStub(&stub);
-  } else if (exponent_type.IsInteger32()) {
-    MathPowStub stub(isolate(), MathPowStub::INTEGER);
-    __ CallStub(&stub);
-  } else {
-    DCHECK(exponent_type.IsDouble());
-    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
-    __ CallStub(&stub);
-  }
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
-  DCHECK(instr->value()->Equals(instr->result()));
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  XMMRegister xmm_scratch = double_scratch0();
-  Label positive, done, zero;
-  __ xorps(xmm_scratch, xmm_scratch);
-  __ ucomisd(input_reg, xmm_scratch);
-  __ j(above, &positive, Label::kNear);
-  __ j(not_carry, &zero, Label::kNear);
-  ExternalReference nan =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  __ movsd(input_reg, Operand::StaticVariable(nan));
-  __ jmp(&done, Label::kNear);
-  __ bind(&zero);
-  ExternalReference ninf =
-      ExternalReference::address_of_negative_infinity();
-  __ movsd(input_reg, Operand::StaticVariable(ninf));
-  __ jmp(&done, Label::kNear);
-  __ bind(&positive);
-  __ fldln2();
-  __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ movsd(Operand(esp, 0), input_reg);
-  __ fld_d(Operand(esp, 0));
-  __ fyl2x();
-  __ fstp_d(Operand(esp, 0));
-  __ movsd(input_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
-  Register input = ToRegister(instr->value());
-  Register result = ToRegister(instr->result());
-  Label not_zero_input;
-  __ bsr(result, input);
-
-  __ j(not_zero, &not_zero_input);
-  __ Move(result, Immediate(63));  // 63^31 == 32
-
-  __ bind(&not_zero_input);
-  __ xor_(result, Immediate(31));  // for x in [0..31], 31^x == 31-x.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
-  XMMRegister input = ToDoubleRegister(instr->value());
-  XMMRegister result = ToDoubleRegister(instr->result());
-  XMMRegister temp0 = double_scratch0();
-  Register temp1 = ToRegister(instr->temp1());
-  Register temp2 = ToRegister(instr->temp2());
-
-  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(instr->HasPointerMap());
-
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
-  if (known_function.is_null()) {
-    LPointerMap* pointers = instr->pointer_map();
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
-  } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(),
-                      instr,
-                      EDI_CONTAINS_TARGET);
-  }
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  int arity = instr->arity();
-  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->constructor()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  // No cell in ebx for construct type feedback in optimized code
-  __ mov(ebx, isolate()->factory()->undefined_value());
-  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
-  __ Move(eax, Immediate(instr->arity()));
-  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->constructor()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  __ Move(eax, Immediate(instr->arity()));
-  __ mov(ebx, isolate()->factory()->undefined_value());
-  ElementsKind kind = instr->hydrogen()->elements_kind();
-  AllocationSiteOverrideMode override_mode =
-      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
-          ? DISABLE_ALLOCATION_SITES
-          : DONT_OVERRIDE;
-
-  if (instr->arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
-    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-  } else if (instr->arity() == 1) {
-    Label done;
-    if (IsFastPackedElementsKind(kind)) {
-      Label packed_case;
-      // We might need a change here
-      // look at the first argument
-      __ mov(ecx, Operand(esp, 0));
-      __ test(ecx, ecx);
-      __ j(zero, &packed_case, Label::kNear);
-
-      ElementsKind holey_kind = GetHoleyElementsKind(kind);
-      ArraySingleArgumentConstructorStub stub(isolate(),
-                                              holey_kind,
-                                              override_mode);
-      CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-      __ jmp(&done, Label::kNear);
-      __ bind(&packed_case);
-    }
-
-    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
-    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-    __ bind(&done);
-  } else {
-    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
-    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-  }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
-  Register function = ToRegister(instr->function());
-  Register code_object = ToRegister(instr->code_object());
-  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
-  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
-  Register result = ToRegister(instr->result());
-  Register base = ToRegister(instr->base_object());
-  if (instr->offset()->IsConstantOperand()) {
-    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
-    __ lea(result, Operand(base, ToInteger32(offset)));
-  } else {
-    Register offset = ToRegister(instr->offset());
-    __ lea(result, Operand(base, offset, times_1, 0));
-  }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
-  Representation representation = instr->hydrogen()->field_representation();
-
-  HObjectAccess access = instr->hydrogen()->access();
-  int offset = access.offset();
-
-  if (access.IsExternalMemory()) {
-    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
-    MemOperand operand = instr->object()->IsConstantOperand()
-        ? MemOperand::StaticVariable(
-            ToExternalReference(LConstantOperand::cast(instr->object())))
-        : MemOperand(ToRegister(instr->object()), offset);
-    if (instr->value()->IsConstantOperand()) {
-      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
-      __ mov(operand, Immediate(ToInteger32(operand_value)));
-    } else {
-      Register value = ToRegister(instr->value());
-      __ Store(value, operand, representation);
-    }
-    return;
-  }
-
-  Register object = ToRegister(instr->object());
-  __ AssertNotSmi(object);
-
-  DCHECK(!representation.IsSmi() ||
-         !instr->value()->IsConstantOperand() ||
-         IsSmi(LConstantOperand::cast(instr->value())));
-  if (representation.IsDouble()) {
-    DCHECK(access.IsInobject());
-    DCHECK(!instr->hydrogen()->has_transition());
-    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
-    XMMRegister value = ToDoubleRegister(instr->value());
-    __ movsd(FieldOperand(object, offset), value);
-    return;
-  }
-
-  if (instr->hydrogen()->has_transition()) {
-    Handle<Map> transition = instr->hydrogen()->transition_map();
-    AddDeprecationDependency(transition);
-    __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
-    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
-      Register temp = ToRegister(instr->temp());
-      Register temp_map = ToRegister(instr->temp_map());
-      // Update the write barrier for the map field.
-      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
-    }
-  }
-
-  // Do the store.
-  Register write_register = object;
-  if (!access.IsInobject()) {
-    write_register = ToRegister(instr->temp());
-    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
-  }
-
-  MemOperand operand = FieldOperand(write_register, offset);
-  if (instr->value()->IsConstantOperand()) {
-    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
-    if (operand_value->IsRegister()) {
-      Register value = ToRegister(operand_value);
-      __ Store(value, operand, representation);
-    } else if (representation.IsInteger32()) {
-      Immediate immediate = ToImmediate(operand_value, representation);
-      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
-      __ mov(operand, immediate);
-    } else {
-      Handle<Object> handle_value = ToHandle(operand_value);
-      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
-      __ mov(operand, handle_value);
-    }
-  } else {
-    Register value = ToRegister(instr->value());
-    __ Store(value, operand, representation);
-  }
-
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    Register value = ToRegister(instr->value());
-    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
-    // Update the write barrier for the object for in-object properties.
-    __ RecordWriteField(write_register,
-                        offset,
-                        value,
-                        temp,
-                        kSaveFPRegs,
-                        EMIT_REMEMBERED_SET,
-                        instr->hydrogen()->SmiCheckForWriteBarrier(),
-                        instr->hydrogen()->PointersToHereCheckForValue());
-  }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  __ mov(StoreDescriptor::NameRegister(), instr->name());
-  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
-  if (instr->index()->IsConstantOperand()) {
-    __ cmp(ToOperand(instr->length()),
-           ToImmediate(LConstantOperand::cast(instr->index()),
-                       instr->hydrogen()->length()->representation()));
-    cc = CommuteCondition(cc);
-  } else if (instr->length()->IsConstantOperand()) {
-    __ cmp(ToOperand(instr->index()),
-           ToImmediate(LConstantOperand::cast(instr->length()),
-                       instr->hydrogen()->index()->representation()));
-  } else {
-    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
-  }
-  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
-    Label done;
-    __ j(NegateCondition(cc), &done, Label::kNear);
-    __ int3();
-    __ bind(&done);
-  } else {
-    DeoptimizeIf(cc, instr, "out of bounds");
-  }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
-  ElementsKind elements_kind = instr->elements_kind();
-  LOperand* key = instr->key();
-  if (!key->IsConstantOperand() &&
-      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
-                                  elements_kind)) {
-    __ SmiUntag(ToRegister(key));
-  }
-  Operand operand(BuildFastArrayOperand(
-      instr->elements(),
-      key,
-      instr->hydrogen()->key()->representation(),
-      elements_kind,
-      instr->base_offset()));
-  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
-      elements_kind == FLOAT32_ELEMENTS) {
-    XMMRegister xmm_scratch = double_scratch0();
-    __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
-    __ movss(operand, xmm_scratch);
-  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
-             elements_kind == FLOAT64_ELEMENTS) {
-    __ movsd(operand, ToDoubleRegister(instr->value()));
-  } else {
-    Register value = ToRegister(instr->value());
-    switch (elements_kind) {
-      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
-      case EXTERNAL_UINT8_ELEMENTS:
-      case EXTERNAL_INT8_ELEMENTS:
-      case UINT8_ELEMENTS:
-      case INT8_ELEMENTS:
-      case UINT8_CLAMPED_ELEMENTS:
-        __ mov_b(operand, value);
-        break;
-      case EXTERNAL_INT16_ELEMENTS:
-      case EXTERNAL_UINT16_ELEMENTS:
-      case UINT16_ELEMENTS:
-      case INT16_ELEMENTS:
-        __ mov_w(operand, value);
-        break;
-      case EXTERNAL_INT32_ELEMENTS:
-      case EXTERNAL_UINT32_ELEMENTS:
-      case UINT32_ELEMENTS:
-      case INT32_ELEMENTS:
-        __ mov(operand, value);
-        break;
-      case EXTERNAL_FLOAT32_ELEMENTS:
-      case EXTERNAL_FLOAT64_ELEMENTS:
-      case FLOAT32_ELEMENTS:
-      case FLOAT64_ELEMENTS:
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case SLOPPY_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  Operand double_store_operand = BuildFastArrayOperand(
-      instr->elements(),
-      instr->key(),
-      instr->hydrogen()->key()->representation(),
-      FAST_DOUBLE_ELEMENTS,
-      instr->base_offset());
-
-  XMMRegister value = ToDoubleRegister(instr->value());
-
-  if (instr->NeedsCanonicalization()) {
-    Label have_value;
-
-    __ ucomisd(value, value);
-    __ j(parity_odd, &have_value, Label::kNear);  // NaN.
-
-    __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
-    __ bind(&have_value);
-  }
-
-  __ movsd(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
-  Register elements = ToRegister(instr->elements());
-  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
-  Operand operand = BuildFastArrayOperand(
-      instr->elements(),
-      instr->key(),
-      instr->hydrogen()->key()->representation(),
-      FAST_ELEMENTS,
-      instr->base_offset());
-  if (instr->value()->IsRegister()) {
-    __ mov(operand, ToRegister(instr->value()));
-  } else {
-    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
-    if (IsSmi(operand_value)) {
-      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
-      __ mov(operand, immediate);
-    } else {
-      DCHECK(!IsInteger32(operand_value));
-      Handle<Object> handle_value = ToHandle(operand_value);
-      __ mov(operand, handle_value);
-    }
-  }
-
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    DCHECK(instr->value()->IsRegister());
-    Register value = ToRegister(instr->value());
-    DCHECK(!instr->key()->IsConstantOperand());
-    SmiCheck check_needed =
-        instr->hydrogen()->value()->type().IsHeapObject()
-          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    // Compute address of modified element and store it into key register.
-    __ lea(key, operand);
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   kSaveFPRegs,
-                   EMIT_REMEMBERED_SET,
-                   check_needed,
-                   instr->hydrogen()->PointersToHereCheckForValue());
-  }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
-  // By cases...external, fast-double, fast
-  if (instr->is_typed_elements()) {
-    DoStoreKeyedExternalArray(instr);
-  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
-    DoStoreKeyedFixedDoubleArray(instr);
-  } else {
-    DoStoreKeyedFixedArray(instr);
-  }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
-  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
-  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
-  Handle<Code> ic =
-      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
-  Register object = ToRegister(instr->object());
-  Register temp = ToRegister(instr->temp());
-  Label no_memento_found;
-  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr, "memento found");
-  __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
-  Register object_reg = ToRegister(instr->object());
-
-  Handle<Map> from_map = instr->original_map();
-  Handle<Map> to_map = instr->transitioned_map();
-  ElementsKind from_kind = instr->from_kind();
-  ElementsKind to_kind = instr->to_kind();
-
-  Label not_applicable;
-  bool is_simple_map_transition =
-      IsSimpleMapChangeTransition(from_kind, to_kind);
-  Label::Distance branch_distance =
-      is_simple_map_transition ? Label::kNear : Label::kFar;
-  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
-  __ j(not_equal, &not_applicable, branch_distance);
-  if (is_simple_map_transition) {
-    Register new_map_reg = ToRegister(instr->new_map_temp());
-    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
-           Immediate(to_map));
-    // Write barrier.
-    DCHECK_NE(instr->temp(), NULL);
-    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
-                         ToRegister(instr->temp()),
-                         kDontSaveFPRegs);
-  } else {
-    DCHECK(ToRegister(instr->context()).is(esi));
-    DCHECK(object_reg.is(eax));
-    PushSafepointRegistersScope scope(this);
-    __ mov(ebx, to_map);
-    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
-    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
-    __ CallStub(&stub);
-    RecordSafepointWithLazyDeopt(instr,
-        RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  }
-  __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
-   public:
-    DeferredStringCharCodeAt(LCodeGen* codegen,
-                             LStringCharCodeAt* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LStringCharCodeAt* instr_;
-  };
-
-  DeferredStringCharCodeAt* deferred =
-      new(zone()) DeferredStringCharCodeAt(this, instr);
-
-  StringCharLoadGenerator::Generate(masm(),
-                                    factory(),
-                                    ToRegister(instr->string()),
-                                    ToRegister(instr->index()),
-                                    ToRegister(instr->result()),
-                                    deferred->entry());
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
-  Register string = ToRegister(instr->string());
-  Register result = ToRegister(instr->result());
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ Move(result, Immediate(0));
-
-  PushSafepointRegistersScope scope(this);
-  __ push(string);
-  // Push the index as a smi. This is safe because of the checks in
-  // DoStringCharCodeAt above.
-  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-  if (instr->index()->IsConstantOperand()) {
-    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
-                                      Representation::Smi());
-    __ push(immediate);
-  } else {
-    Register index = ToRegister(instr->index());
-    __ SmiTag(index);
-    __ push(index);
-  }
-  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
-                          instr, instr->context());
-  __ AssertSmi(eax);
-  __ SmiUntag(eax);
-  __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode FINAL : public LDeferredCode {
-   public:
-    DeferredStringCharFromCode(LCodeGen* codegen,
-                               LStringCharFromCode* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharFromCode(instr_);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LStringCharFromCode* instr_;
-  };
-
-  DeferredStringCharFromCode* deferred =
-      new(zone()) DeferredStringCharFromCode(this, instr);
-
-  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
-  Register char_code = ToRegister(instr->char_code());
-  Register result = ToRegister(instr->result());
-  DCHECK(!char_code.is(result));
-
-  __ cmp(char_code, String::kMaxOneByteCharCode);
-  __ j(above, deferred->entry());
-  __ Move(result, Immediate(factory()->single_character_string_cache()));
-  __ mov(result, FieldOperand(result,
-                              char_code, times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ cmp(result, factory()->undefined_value());
-  __ j(equal, deferred->entry());
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
-  Register char_code = ToRegister(instr->char_code());
-  Register result = ToRegister(instr->result());
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ Move(result, Immediate(0));
-
-  PushSafepointRegistersScope scope(this);
-  __ SmiTag(char_code);
-  __ push(char_code);
-  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
-  __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->left()).is(edx));
-  DCHECK(ToRegister(instr->right()).is(eax));
-  StringAddStub stub(isolate(),
-                     instr->hydrogen()->flags(),
-                     instr->hydrogen()->pretenure_flag());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  LOperand* input = instr->value();
-  LOperand* output = instr->result();
-  DCHECK(input->IsRegister() || input->IsStackSlot());
-  DCHECK(output->IsDoubleRegister());
-  __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
-  LOperand* input = instr->value();
-  LOperand* output = instr->result();
-  __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI FINAL : public LDeferredCode {
-   public:
-    DeferredNumberTagI(LCodeGen* codegen,
-                       LNumberTagI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagIU(
-          instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LNumberTagI* instr_;
-  };
-
-  LOperand* input = instr->value();
-  DCHECK(input->IsRegister() && input->Equals(instr->result()));
-  Register reg = ToRegister(input);
-
-  DeferredNumberTagI* deferred =
-      new(zone()) DeferredNumberTagI(this, instr);
-  __ SmiTag(reg);
-  __ j(overflow, deferred->entry());
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU FINAL : public LDeferredCode {
-   public:
-    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagIU(
-          instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LNumberTagU* instr_;
-  };
-
-  LOperand* input = instr->value();
-  DCHECK(input->IsRegister() && input->Equals(instr->result()));
-  Register reg = ToRegister(input);
-
-  DeferredNumberTagU* deferred =
-      new(zone()) DeferredNumberTagU(this, instr);
-  __ cmp(reg, Immediate(Smi::kMaxValue));
-  __ j(above, deferred->entry());
-  __ SmiTag(reg);
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
-                                     LOperand* value,
-                                     LOperand* temp,
-                                     IntegerSignedness signedness) {
-  Label done, slow;
-  Register reg = ToRegister(value);
-  Register tmp = ToRegister(temp);
-  XMMRegister xmm_scratch = double_scratch0();
-
-  if (signedness == SIGNED_INT32) {
-    // There was overflow, so bits 30 and 31 of the original integer
-    // disagree. Try to allocate a heap number in new space and store
-    // the value in there. If that fails, call the runtime system.
-    __ SmiUntag(reg);
-    __ xor_(reg, 0x80000000);
-    __ Cvtsi2sd(xmm_scratch, Operand(reg));
-  } else {
-    __ LoadUint32(xmm_scratch, reg);
-  }
-
-  if (FLAG_inline_new) {
-    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
-    __ jmp(&done, Label::kNear);
-  }
-
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
-  {
-    // TODO(3095996): Put a valid pointer value in the stack slot where the
-    // result register is stored, as this register is in the pointer map, but
-    // contains an integer value.
-    __ Move(reg, Immediate(0));
-
-    // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this);
-
-    // NumberTagI and NumberTagD use the context from the frame, rather than
-    // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kAllocateHeapNumber.
-    // The corresponding HChange instructions are added in a phase that does
-    // not have easy access to the local context.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-    __ StoreToSafepointRegisterSlot(reg, eax);
-  }
-
-  // Done. Put the value in xmm_scratch into the value of the allocated heap
-  // number.
-  __ bind(&done);
-  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD FINAL : public LDeferredCode {
-   public:
-    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LNumberTagD* instr_;
-  };
-
-  Register reg = ToRegister(instr->result());
-
-  DeferredNumberTagD* deferred =
-      new(zone()) DeferredNumberTagD(this, instr);
-  if (FLAG_inline_new) {
-    Register tmp = ToRegister(instr->temp());
-    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
-  } else {
-    __ jmp(deferred->entry());
-  }
-  __ bind(deferred->exit());
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
-  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  Register reg = ToRegister(instr->result());
-  __ Move(reg, Immediate(0));
-
-  PushSafepointRegistersScope scope(this);
-  // NumberTagI and NumberTagD use the context from the frame, rather than
-  // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kAllocateHeapNumber.
-  // The corresponding HChange instructions are added in a phase that does
-  // not have easy access to the local context.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-  __ StoreToSafepointRegisterSlot(reg, eax);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
-  HChange* hchange = instr->hydrogen();
-  Register input = ToRegister(instr->value());
-  if (hchange->CheckFlag(HValue::kCanOverflow) &&
-      hchange->value()->CheckFlag(HValue::kUint32)) {
-    __ test(input, Immediate(0xc0000000));
-    DeoptimizeIf(not_zero, instr, "overflow");
-  }
-  __ SmiTag(input);
-  if (hchange->CheckFlag(HValue::kCanOverflow) &&
-      !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr, "overflow");
-  }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
-  LOperand* input = instr->value();
-  Register result = ToRegister(input);
-  DCHECK(input->IsRegister() && input->Equals(instr->result()));
-  if (instr->needs_check()) {
-    __ test(result, Immediate(kSmiTagMask));
-    DeoptimizeIf(not_zero, instr, "not a Smi");
-  } else {
-    __ AssertSmi(result);
-  }
-  __ SmiUntag(result);
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
-                                Register temp_reg, XMMRegister result_reg,
-                                NumberUntagDMode mode) {
-  bool can_convert_undefined_to_nan =
-      instr->hydrogen()->can_convert_undefined_to_nan();
-  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
-  Label convert, load_smi, done;
-
-  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
-    // Smi check.
-    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
-    // Heap number map check.
-    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-           factory()->heap_number_map());
-    if (can_convert_undefined_to_nan) {
-      __ j(not_equal, &convert, Label::kNear);
-    } else {
-      DeoptimizeIf(not_equal, instr, "not a heap number");
-    }
-
-    // Heap number to XMM conversion.
-    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-
-    if (deoptimize_on_minus_zero) {
-      XMMRegister xmm_scratch = double_scratch0();
-      __ xorps(xmm_scratch, xmm_scratch);
-      __ ucomisd(result_reg, xmm_scratch);
-      __ j(not_zero, &done, Label::kNear);
-      __ movmskpd(temp_reg, result_reg);
-      __ test_b(temp_reg, 1);
-      DeoptimizeIf(not_zero, instr, "minus zero");
-    }
-    __ jmp(&done, Label::kNear);
-
-    if (can_convert_undefined_to_nan) {
-      __ bind(&convert);
-
-      // Convert undefined (and hole) to NaN.
-      __ cmp(input_reg, factory()->undefined_value());
-      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
-
-      ExternalReference nan =
-          ExternalReference::address_of_canonical_non_hole_nan();
-      __ movsd(result_reg, Operand::StaticVariable(nan));
-      __ jmp(&done, Label::kNear);
-    }
-  } else {
-    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
-  }
-
-  __ bind(&load_smi);
-  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
-  // input register since we avoid dependencies.
-  __ mov(temp_reg, input_reg);
-  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
-  __ Cvtsi2sd(result_reg, Operand(temp_reg));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
-  Register input_reg = ToRegister(instr->value());
-
-  // The input was optimistically untagged; revert it.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
-
-  if (instr->truncating()) {
-    Label no_heap_number, check_bools, check_false;
-
-    // Heap number map check.
-    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-           factory()->heap_number_map());
-    __ j(not_equal, &no_heap_number, Label::kNear);
-    __ TruncateHeapNumberToI(input_reg, input_reg);
-    __ jmp(done);
-
-    __ bind(&no_heap_number);
-    // Check for Oddballs. Undefined/False is converted to zero and True to one
-    // for truncating conversions.
-    __ cmp(input_reg, factory()->undefined_value());
-    __ j(not_equal, &check_bools, Label::kNear);
-    __ Move(input_reg, Immediate(0));
-    __ jmp(done);
-
-    __ bind(&check_bools);
-    __ cmp(input_reg, factory()->true_value());
-    __ j(not_equal, &check_false, Label::kNear);
-    __ Move(input_reg, Immediate(1));
-    __ jmp(done);
-
-    __ bind(&check_false);
-    __ cmp(input_reg, factory()->false_value());
-    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
-    __ Move(input_reg, Immediate(0));
-  } else {
-    XMMRegister scratch = ToDoubleRegister(instr->temp());
-    DCHECK(!scratch.is(xmm0));
-    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-           isolate()->factory()->heap_number_map());
-    DeoptimizeIf(not_equal, instr, "not a heap number");
-    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
-    __ cvttsd2si(input_reg, Operand(xmm0));
-    __ Cvtsi2sd(scratch, Operand(input_reg));
-    __ ucomisd(xmm0, scratch);
-    DeoptimizeIf(not_equal, instr, "lost precision");
-    DeoptimizeIf(parity_even, instr, "NaN");
-    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
-      __ test(input_reg, Operand(input_reg));
-      __ j(not_zero, done);
-      __ movmskpd(input_reg, xmm0);
-      __ and_(input_reg, 1);
-      DeoptimizeIf(not_zero, instr, "minus zero");
-    }
-  }
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI FINAL : public LDeferredCode {
-   public:
-    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LTaggedToI* instr_;
-  };
-
-  LOperand* input = instr->value();
-  DCHECK(input->IsRegister());
-  Register input_reg = ToRegister(input);
-  DCHECK(input_reg.is(ToRegister(instr->result())));
-
-  if (instr->hydrogen()->value()->representation().IsSmi()) {
-    __ SmiUntag(input_reg);
-  } else {
-    DeferredTaggedToI* deferred =
-        new(zone()) DeferredTaggedToI(this, instr);
-    // Optimistically untag the input.
-    // If the input is a HeapObject, SmiUntag will set the carry flag.
-    STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-    __ SmiUntag(input_reg);
-    // Branch to deferred code if the input was tagged.
-    // The deferred code will take care of restoring the tag.
-    __ j(carry, deferred->entry());
-    __ bind(deferred->exit());
-  }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
-  LOperand* input = instr->value();
-  DCHECK(input->IsRegister());
-  LOperand* temp = instr->temp();
-  DCHECK(temp->IsRegister());
-  LOperand* result = instr->result();
-  DCHECK(result->IsDoubleRegister());
-
-  Register input_reg = ToRegister(input);
-  Register temp_reg = ToRegister(temp);
-
-  HValue* value = instr->hydrogen()->value();
-  NumberUntagDMode mode = value->representation().IsSmi()
-      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
-  XMMRegister result_reg = ToDoubleRegister(result);
-  EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
-  LOperand* input = instr->value();
-  DCHECK(input->IsDoubleRegister());
-  LOperand* result = instr->result();
-  DCHECK(result->IsRegister());
-  Register result_reg = ToRegister(result);
-
-  if (instr->truncating()) {
-    XMMRegister input_reg = ToDoubleRegister(input);
-    __ TruncateDoubleToI(result_reg, input_reg);
-  } else {
-    Label lost_precision, is_nan, minus_zero, done;
-    XMMRegister input_reg = ToDoubleRegister(input);
-    XMMRegister xmm_scratch = double_scratch0();
-    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-    __ DoubleToI(result_reg, input_reg, xmm_scratch,
-                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
-                 &is_nan, &minus_zero, dist);
-    __ jmp(&done, dist);
-    __ bind(&lost_precision);
-    DeoptimizeIf(no_condition, instr, "lost precision");
-    __ bind(&is_nan);
-    DeoptimizeIf(no_condition, instr, "NaN");
-    __ bind(&minus_zero);
-    DeoptimizeIf(no_condition, instr, "minus zero");
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
-  LOperand* input = instr->value();
-  DCHECK(input->IsDoubleRegister());
-  LOperand* result = instr->result();
-  DCHECK(result->IsRegister());
-  Register result_reg = ToRegister(result);
-
-  Label lost_precision, is_nan, minus_zero, done;
-  XMMRegister input_reg = ToDoubleRegister(input);
-  XMMRegister xmm_scratch = double_scratch0();
-  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-  __ DoubleToI(result_reg, input_reg, xmm_scratch,
-               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
-               &minus_zero, dist);
-  __ jmp(&done, dist);
-  __ bind(&lost_precision);
-  DeoptimizeIf(no_condition, instr, "lost precision");
-  __ bind(&is_nan);
-  DeoptimizeIf(no_condition, instr, "NaN");
-  __ bind(&minus_zero);
-  DeoptimizeIf(no_condition, instr, "minus zero");
-  __ bind(&done);
-  __ SmiTag(result_reg);
-  DeoptimizeIf(overflow, instr, "overflow");
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
-  LOperand* input = instr->value();
-  __ test(ToOperand(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(not_zero, instr, "not a Smi");
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
-    LOperand* input = instr->value();
-    __ test(ToOperand(input), Immediate(kSmiTagMask));
-    DeoptimizeIf(zero, instr, "Smi");
-  }
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
-  Register input = ToRegister(instr->value());
-  Register temp = ToRegister(instr->temp());
-
-  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
-  if (instr->hydrogen()->is_interval_check()) {
-    InstanceType first;
-    InstanceType last;
-    instr->hydrogen()->GetCheckInterval(&first, &last);
-
-    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-            static_cast<int8_t>(first));
-
-    // If there is only one type in the interval check for equality.
-    if (first == last) {
-      DeoptimizeIf(not_equal, instr, "wrong instance type");
-    } else {
-      DeoptimizeIf(below, instr, "wrong instance type");
-      // Omit check for the last type.
-      if (last != LAST_TYPE) {
-        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-                static_cast<int8_t>(last));
-        DeoptimizeIf(above, instr, "wrong instance type");
-      }
-    }
-  } else {
-    uint8_t mask;
-    uint8_t tag;
-    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
-    if (base::bits::IsPowerOfTwo32(mask)) {
-      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
-      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
-    } else {
-      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
-      __ and_(temp, mask);
-      __ cmp(temp, tag);
-      DeoptimizeIf(not_equal, instr, "wrong instance type");
-    }
-  }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
-  Handle<HeapObject> object = instr->hydrogen()->object().handle();
-  if (instr->hydrogen()->object_in_new_space()) {
-    Register reg = ToRegister(instr->value());
-    Handle<Cell> cell = isolate()->factory()->NewCell(object);
-    __ cmp(reg, Operand::ForCell(cell));
-  } else {
-    Operand operand = ToOperand(instr->value());
-    __ cmp(operand, object);
-  }
-  DeoptimizeIf(not_equal, instr, "value mismatch");
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
-  {
-    PushSafepointRegistersScope scope(this);
-    __ push(object);
-    __ xor_(esi, esi);
-    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
-
-    __ test(eax, Immediate(kSmiTagMask));
-  }
-  DeoptimizeIf(zero, instr, "instance migration failed");
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps FINAL : public LDeferredCode {
-   public:
-    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr,  Register object)
-        : LDeferredCode(codegen), instr_(instr), object_(object) {
-      SetExit(check_maps());
-    }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredInstanceMigration(instr_, object_);
-    }
-    Label* check_maps() { return &check_maps_; }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LCheckMaps* instr_;
-    Label check_maps_;
-    Register object_;
-  };
-
-  if (instr->hydrogen()->IsStabilityCheck()) {
-    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
-    for (int i = 0; i < maps->size(); ++i) {
-      AddStabilityDependency(maps->at(i).handle());
-    }
-    return;
-  }
-
-  LOperand* input = instr->value();
-  DCHECK(input->IsRegister());
-  Register reg = ToRegister(input);
-
-  DeferredCheckMaps* deferred = NULL;
-  if (instr->hydrogen()->HasMigrationTarget()) {
-    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
-    __ bind(deferred->check_maps());
-  }
-
-  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
-  Label success;
-  for (int i = 0; i < maps->size() - 1; i++) {
-    Handle<Map> map = maps->at(i).handle();
-    __ CompareMap(reg, map);
-    __ j(equal, &success, Label::kNear);
-  }
-
-  Handle<Map> map = maps->at(maps->size() - 1).handle();
-  __ CompareMap(reg, map);
-  if (instr->hydrogen()->HasMigrationTarget()) {
-    __ j(not_equal, deferred->entry());
-  } else {
-    DeoptimizeIf(not_equal, instr, "wrong map");
-  }
-
-  __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
-  XMMRegister xmm_scratch = double_scratch0();
-  Register result_reg = ToRegister(instr->result());
-  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  DCHECK(instr->unclamped()->Equals(instr->result()));
-  Register value_reg = ToRegister(instr->result());
-  __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  DCHECK(instr->unclamped()->Equals(instr->result()));
-  Register input_reg = ToRegister(instr->unclamped());
-  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
-  XMMRegister xmm_scratch = double_scratch0();
-  Label is_smi, done, heap_number;
-
-  __ JumpIfSmi(input_reg, &is_smi);
-
-  // Check for heap number
-  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-         factory()->heap_number_map());
-  __ j(equal, &heap_number, Label::kNear);
-
-  // Check for undefined. Undefined is converted to zero for clamping
-  // conversions.
-  __ cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
-  __ mov(input_reg, 0);
-  __ jmp(&done, Label::kNear);
-
-  // Heap number
-  __ bind(&heap_number);
-  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
-  __ jmp(&done, Label::kNear);
-
-  // smi
-  __ bind(&is_smi);
-  __ SmiUntag(input_reg);
-  __ ClampUint8(input_reg);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
-  XMMRegister value_reg = ToDoubleRegister(instr->value());
-  Register result_reg = ToRegister(instr->result());
-  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
-    if (CpuFeatures::IsSupported(SSE4_1)) {
-      CpuFeatureScope scope2(masm(), SSE4_1);
-      __ pextrd(result_reg, value_reg, 1);
-    } else {
-      XMMRegister xmm_scratch = double_scratch0();
-      __ pshufd(xmm_scratch, value_reg, 1);
-      __ movd(result_reg, xmm_scratch);
-    }
-  } else {
-    __ movd(result_reg, value_reg);
-  }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  Register hi_reg = ToRegister(instr->hi());
-  Register lo_reg = ToRegister(instr->lo());
-  XMMRegister result_reg = ToDoubleRegister(instr->result());
-
-  if (CpuFeatures::IsSupported(SSE4_1)) {
-    CpuFeatureScope scope2(masm(), SSE4_1);
-    __ movd(result_reg, lo_reg);
-    __ pinsrd(result_reg, hi_reg, 1);
-  } else {
-    XMMRegister xmm_scratch = double_scratch0();
-    __ movd(result_reg, hi_reg);
-    __ psllq(result_reg, 32);
-    __ movd(xmm_scratch, lo_reg);
-    __ orps(result_reg, xmm_scratch);
-  }
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate FINAL : public LDeferredCode {
-   public:
-    DeferredAllocate(LCodeGen* codegen,  LAllocate* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LAllocate* instr_;
-  };
-
-  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
-
-  Register result = ToRegister(instr->result());
-  Register temp = ToRegister(instr->temp());
-
-  // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
-  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
-    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
-  }
-  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
-    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
-  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
-    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
-  }
-
-  if (instr->size()->IsConstantOperand()) {
-    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    if (size <= Page::kMaxRegularHeapObjectSize) {
-      __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
-    } else {
-      __ jmp(deferred->entry());
-    }
-  } else {
-    Register size = ToRegister(instr->size());
-    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
-  }
-
-  __ bind(deferred->exit());
-
-  if (instr->hydrogen()->MustPrefillWithFiller()) {
-    if (instr->size()->IsConstantOperand()) {
-      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-      __ mov(temp, (size / kPointerSize) - 1);
-    } else {
-      temp = ToRegister(instr->size());
-      __ shr(temp, kPointerSizeLog2);
-      __ dec(temp);
-    }
-    Label loop;
-    __ bind(&loop);
-    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
-        isolate()->factory()->one_pointer_filler_map());
-    __ dec(temp);
-    __ j(not_zero, &loop);
-  }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
-  Register result = ToRegister(instr->result());
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ Move(result, Immediate(Smi::FromInt(0)));
-
-  PushSafepointRegistersScope scope(this);
-  if (instr->size()->IsRegister()) {
-    Register size = ToRegister(instr->size());
-    DCHECK(!size.is(result));
-    __ SmiTag(ToRegister(instr->size()));
-    __ push(size);
-  } else {
-    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-    if (size >= 0 && size <= Smi::kMaxValue) {
-      __ push(Immediate(Smi::FromInt(size)));
-    } else {
-      // We should never get here at runtime => abort
-      __ int3();
-      return;
-    }
-  }
-
-  int flags = AllocateDoubleAlignFlag::encode(
-      instr->hydrogen()->MustAllocateDoubleAligned());
-  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
-    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
-  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
-    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
-  } else {
-    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
-  }
-  __ push(Immediate(Smi::FromInt(flags)));
-
-  CallRuntimeFromDeferred(
-      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
-  __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(eax));
-  __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  Label materialized;
-  // Registers will be used as follows:
-  // ecx = literals array.
-  // ebx = regexp literal.
-  // eax = regexp literal clone.
-  // esi = context.
-  int literal_offset =
-      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
-  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
-  __ mov(ebx, FieldOperand(ecx, literal_offset));
-  __ cmp(ebx, factory()->undefined_value());
-  __ j(not_equal, &materialized, Label::kNear);
-
-  // Create regexp literal using runtime function
-  // Result will be in eax.
-  __ push(ecx);
-  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(instr->hydrogen()->pattern()));
-  __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
-  __ mov(ebx, eax);
-
-  __ bind(&materialized);
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-  Label allocated, runtime_allocate;
-  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated, Label::kNear);
-
-  __ bind(&runtime_allocate);
-  __ push(ebx);
-  __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-  __ pop(ebx);
-
-  __ bind(&allocated);
-  // Copy the content into the newly allocated memory.
-  // (Unroll copy loop once for better throughput).
-  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
-    __ mov(edx, FieldOperand(ebx, i));
-    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
-    __ mov(FieldOperand(eax, i), edx);
-    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
-  }
-  if ((size % (2 * kPointerSize)) != 0) {
-    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
-    __ mov(FieldOperand(eax, size - kPointerSize), edx);
-  }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  bool pretenure = instr->hydrogen()->pretenure();
-  if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->kind());
-    __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ push(esi);
-    __ push(Immediate(instr->hydrogen()->shared_info()));
-    __ push(Immediate(pretenure ? factory()->true_value()
-                                : factory()->false_value()));
-    CallRuntime(Runtime::kNewClosure, 3, instr);
-  }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  LOperand* input = instr->value();
-  EmitPushTaggedOperand(input);
-  CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
-  Register input = ToRegister(instr->value());
-  Condition final_branch_condition = EmitTypeofIs(instr, input);
-  if (final_branch_condition != no_condition) {
-    EmitBranch(instr, final_branch_condition);
-  }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
-  Label* true_label = instr->TrueLabel(chunk_);
-  Label* false_label = instr->FalseLabel(chunk_);
-  Handle<String> type_name = instr->type_literal();
-  int left_block = instr->TrueDestination(chunk_);
-  int right_block = instr->FalseDestination(chunk_);
-  int next_block = GetNextEmittedBlock();
-
-  Label::Distance true_distance = left_block == next_block ? Label::kNear
-                                                           : Label::kFar;
-  Label::Distance false_distance = right_block == next_block ? Label::kNear
-                                                             : Label::kFar;
-  Condition final_branch_condition = no_condition;
-  if (String::Equals(type_name, factory()->number_string())) {
-    __ JumpIfSmi(input, true_label, true_distance);
-    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
-           factory()->heap_number_map());
-    final_branch_condition = equal;
-
-  } else if (String::Equals(type_name, factory()->string_string())) {
-    __ JumpIfSmi(input, false_label, false_distance);
-    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
-    __ j(above_equal, false_label, false_distance);
-    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    final_branch_condition = zero;
-
-  } else if (String::Equals(type_name, factory()->symbol_string())) {
-    __ JumpIfSmi(input, false_label, false_distance);
-    __ CmpObjectType(input, SYMBOL_TYPE, input);
-    final_branch_condition = equal;
-
-  } else if (String::Equals(type_name, factory()->boolean_string())) {
-    __ cmp(input, factory()->true_value());
-    __ j(equal, true_label, true_distance);
-    __ cmp(input, factory()->false_value());
-    final_branch_condition = equal;
-
-  } else if (String::Equals(type_name, factory()->undefined_string())) {
-    __ cmp(input, factory()->undefined_value());
-    __ j(equal, true_label, true_distance);
-    __ JumpIfSmi(input, false_label, false_distance);
-    // Check for undetectable objects => true.
-    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
-    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    final_branch_condition = not_zero;
-
-  } else if (String::Equals(type_name, factory()->function_string())) {
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ JumpIfSmi(input, false_label, false_distance);
-    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label, true_distance);
-    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
-    final_branch_condition = equal;
-
-  } else if (String::Equals(type_name, factory()->object_string())) {
-    __ JumpIfSmi(input, false_label, false_distance);
-    __ cmp(input, factory()->null_value());
-    __ j(equal, true_label, true_distance);
-    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
-    __ j(below, false_label, false_distance);
-    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-    __ j(above, false_label, false_distance);
-    // Check for undetectable objects => false.
-    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    final_branch_condition = zero;
-
-  } else {
-    __ jmp(false_label, false_distance);
-  }
-  return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
-  Register temp = ToRegister(instr->temp());
-
-  EmitIsConstructCall(temp);
-  EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
-  // Get the frame pointer for the calling frame.
-  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker, Label::kNear);
-  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
-         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
-  if (!info()->IsStub()) {
-    // Ensure that we have enough space after the previous lazy-bailout
-    // instruction for patching the code here.
-    int current_pc = masm()->pc_offset();
-    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
-      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      __ Nop(padding_size);
-    }
-  }
-  last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  last_lazy_deopt_pc_ = masm()->pc_offset();
-  DCHECK(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  Deoptimizer::BailoutType type = instr->hydrogen()->type();
-  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
-  // needed return address), even though the implementation of LAZY and EAGER is
-  // now identical. When LAZY is eventually completely folded into EAGER, remove
-  // the special case below.
-  if (info()->IsStub() && type == Deoptimizer::EAGER) {
-    type = Deoptimizer::LAZY;
-  }
-  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
-  // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
-  // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  PushSafepointRegistersScope scope(this);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithLazyDeopt(
-      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  DCHECK(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck FINAL : public LDeferredCode {
-   public:
-    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LStackCheck* instr_;
-  };
-
-  DCHECK(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  // There is no LLazyBailout instruction for stack-checks. We have to
-  // prepare for lazy deoptimization explicitly here.
-  if (instr->hydrogen()->is_function_entry()) {
-    // Perform stack overflow check.
-    Label done;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(isolate());
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(above_equal, &done, Label::kNear);
-
-    DCHECK(instr->context()->IsRegister());
-    DCHECK(ToRegister(instr->context()).is(esi));
-    CallCode(isolate()->builtins()->StackCheck(),
-             RelocInfo::CODE_TARGET,
-             instr);
-    __ bind(&done);
-  } else {
-    DCHECK(instr->hydrogen()->is_backwards_branch());
-    // Perform stack overflow check if this goto needs it before jumping.
-    DeferredStackCheck* deferred_stack_check =
-        new(zone()) DeferredStackCheck(this, instr);
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(isolate());
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(below, deferred_stack_check->entry());
-    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
-    __ bind(instr->done_label());
-    deferred_stack_check->SetExit(instr->done_label());
-    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
-    // Don't record a deoptimization index for the safepoint here.
-    // This will be done explicitly when emitting call and the safepoint in
-    // the deferred code.
-  }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
-  // This is a pseudo-instruction that ensures that the environment here is
-  // properly registered for deoptimization and records the assembler's PC
-  // offset.
-  LEnvironment* environment = instr->environment();
-
-  // If the environment were already registered, we would have no way of
-  // backpatching it with the spill slot operands.
-  DCHECK(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
-  GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  DeoptimizeIf(equal, instr, "undefined");
-
-  __ cmp(eax, isolate()->factory()->null_value());
-  DeoptimizeIf(equal, instr, "null");
-
-  __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr, "Smi");
-
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr, "wrong instance type");
-
-  Label use_cache, call_runtime;
-  __ CheckEnumCache(&call_runtime);
-
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ jmp(&use_cache, Label::kNear);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(eax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr, "wrong map");
-  __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
-  Register map = ToRegister(instr->map());
-  Register result = ToRegister(instr->result());
-  Label load_cache, done;
-  __ EnumLength(result, map);
-  __ cmp(result, Immediate(Smi::FromInt(0)));
-  __ j(not_equal, &load_cache, Label::kNear);
-  __ mov(result, isolate()->factory()->empty_fixed_array());
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&load_cache);
-  __ LoadInstanceDescriptors(map, result);
-  __ mov(result,
-         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
-  __ mov(result,
-         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
-  __ bind(&done);
-  __ test(result, result);
-  DeoptimizeIf(equal, instr, "no cache");
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
-  Register object = ToRegister(instr->value());
-  __ cmp(ToRegister(instr->map()),
-         FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr, "wrong map");
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
-                                           Register object,
-                                           Register index) {
-  PushSafepointRegistersScope scope(this);
-  __ push(object);
-  __ push(index);
-  __ xor_(esi, esi);
-  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
-  __ StoreToSafepointRegisterSlot(object, eax);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
-   public:
-    DeferredLoadMutableDouble(LCodeGen* codegen,
-                              LLoadFieldByIndex* instr,
-                              Register object,
-                              Register index)
-        : LDeferredCode(codegen),
-          instr_(instr),
-          object_(object),
-          index_(index) {
-    }
-    void Generate() OVERRIDE {
-      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
-    }
-    LInstruction* instr() OVERRIDE { return instr_; }
-
-   private:
-    LLoadFieldByIndex* instr_;
-    Register object_;
-    Register index_;
-  };
-
-  Register object = ToRegister(instr->object());
-  Register index = ToRegister(instr->index());
-
-  DeferredLoadMutableDouble* deferred;
-  deferred = new(zone()) DeferredLoadMutableDouble(
-      this, instr, object, index);
-
-  Label out_of_object, done;
-  __ test(index, Immediate(Smi::FromInt(1)));
-  __ j(not_zero, deferred->entry());
-
-  __ sar(index, 1);
-
-  __ cmp(index, Immediate(0));
-  __ j(less, &out_of_object, Label::kNear);
-  __ mov(object, FieldOperand(object,
-                              index,
-                              times_half_pointer_size,
-                              JSObject::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&out_of_object);
-  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
-  __ neg(index);
-  // Index is now equal to out of object property index plus 1.
-  __ mov(object, FieldOperand(object,
-                              index,
-                              times_half_pointer_size,
-                              FixedArray::kHeaderSize - kPointerSize));
-  __ bind(deferred->exit());
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, 2, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
deleted file mode 100644
index 0918252..0000000
--- a/src/ia32/lithium-codegen-ia32.h
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "src/ia32/lithium-ia32.h"
-
-#include "src/base/logging.h"
-#include "src/deoptimizer.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
-#include "src/lithium-codegen.h"
-#include "src/safepoint-table.h"
-#include "src/scopes.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
-      : LCodeGenBase(chunk, assembler, info),
-        deoptimizations_(4, info->zone()),
-        jump_table_(4, info->zone()),
-        deoptimization_literals_(8, info->zone()),
-        inlined_function_count_(0),
-        scope_(info->scope()),
-        translations_(info->zone()),
-        deferred_(8, info->zone()),
-        dynamic_frame_alignment_(false),
-        support_aligned_spilled_doubles_(false),
-        osr_pc_offset_(-1),
-        frame_is_built_(false),
-        safepoints_(info->zone()),
-        resolver_(this),
-        expected_safepoint_kind_(Safepoint::kSimple) {
-    PopulateDeoptimizationLiteralsWithInlinedFunctions();
-  }
-
-  int LookupDestination(int block_id) const {
-    return chunk()->LookupDestination(block_id);
-  }
-
-  bool IsNextEmittedBlock(int block_id) const {
-    return LookupDestination(block_id) == GetNextEmittedBlock();
-  }
-
-  bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
-  }
-  bool NeedsDeferredFrame() const {
-    return !NeedsEagerFrame() && info()->is_deferred_calling();
-  }
-
-  // Support for converting LOperands to assembler types.
-  Operand ToOperand(LOperand* op) const;
-  Register ToRegister(LOperand* op) const;
-  XMMRegister ToDoubleRegister(LOperand* op) const;
-
-  bool IsInteger32(LConstantOperand* op) const;
-  bool IsSmi(LConstantOperand* op) const;
-  Immediate ToImmediate(LOperand* op, const Representation& r) const {
-    return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
-  }
-  double ToDouble(LConstantOperand* op) const;
-
-  Handle<Object> ToHandle(LConstantOperand* op) const;
-
-  // The operand denoting the second word (the one with a higher address) of
-  // a double stack slot.
-  Operand HighOperand(LOperand* op);
-
-  // Try to generate code for the entire chunk, but it may fail if the
-  // chunk contains constructs we cannot handle. Returns true if the
-  // code generation attempt succeeded.
-  bool GenerateCode();
-
-  // Finish the code by setting stack height, safepoint, and bailout
-  // information on it.
-  void FinishCode(Handle<Code> code);
-
-  // Deferred code support.
-  void DoDeferredNumberTagD(LNumberTagD* instr);
-
-  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
-  void DoDeferredNumberTagIU(LInstruction* instr,
-                             LOperand* value,
-                             LOperand* temp,
-                             IntegerSignedness signedness);
-
-  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
-  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
-  void DoDeferredStackCheck(LStackCheck* instr);
-  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
-  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredAllocate(LAllocate* instr);
-  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                       Label* map_check);
-  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
-  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
-                                   Register object,
-                                   Register index);
-
-  // Parallel move support.
-  void DoParallelMove(LParallelMove* move);
-  void DoGap(LGap* instr);
-
-  // Emit frame translation commands for an environment.
-  void WriteTranslation(LEnvironment* environment, Translation* translation);
-
-  void EnsureRelocSpaceForDeoptimization();
-
-  // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
-  StrictMode strict_mode() const { return info()->strict_mode(); }
-
-  Scope* scope() const { return scope_; }
-
-  XMMRegister double_scratch0() const { return xmm0; }
-
-  void EmitClassOfTest(Label* if_true,
-                       Label* if_false,
-                       Handle<String> class_name,
-                       Register input,
-                       Register temporary,
-                       Register temporary2);
-
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-
-  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
-  void SaveCallerDoubles();
-  void RestoreCallerDoubles();
-
-  // Code generation passes.  Returns true if code generation should
-  // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
-  void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
-  bool GeneratePrologue();
-  bool GenerateDeferredCode();
-  bool GenerateJumpTable();
-  bool GenerateSafepointTable();
-
-  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
-  void GenerateOsrPrologue();
-
-  enum SafepointMode {
-    RECORD_SIMPLE_SAFEPOINT,
-    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
-  };
-
-  void CallCode(Handle<Code> code,
-                RelocInfo::Mode mode,
-                LInstruction* instr);
-
-  void CallCodeGeneric(Handle<Code> code,
-                       RelocInfo::Mode mode,
-                       LInstruction* instr,
-                       SafepointMode safepoint_mode);
-
-  void CallRuntime(const Runtime::Function* fun,
-                   int argc,
-                   LInstruction* instr,
-                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
-  void CallRuntime(Runtime::FunctionId id,
-                   int argc,
-                   LInstruction* instr) {
-    const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, argc, instr);
-  }
-
-  void CallRuntimeFromDeferred(Runtime::FunctionId id,
-                               int argc,
-                               LInstruction* instr,
-                               LOperand* context);
-
-  void LoadContextFromDeferred(LOperand* context);
-
-  enum EDIState {
-    EDI_UNINITIALIZED,
-    EDI_CONTAINS_TARGET
-  };
-
-  // Generate a direct call to a known function.  Expects the function
-  // to be in edi.
-  void CallKnownFunction(Handle<JSFunction> function,
-                         int formal_parameter_count,
-                         int arity,
-                         LInstruction* instr,
-                         EDIState edi_state);
-
-  void RecordSafepointWithLazyDeopt(LInstruction* instr,
-                                    SafepointMode safepoint_mode);
-
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
-                                            Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
-                    Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
-
-  bool DeoptEveryNTimes() {
-    return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
-  }
-
-  void AddToTranslation(LEnvironment* environment,
-                        Translation* translation,
-                        LOperand* op,
-                        bool is_tagged,
-                        bool is_uint32,
-                        int* object_index_pointer,
-                        int* dematerialized_index_pointer);
-  void PopulateDeoptimizationData(Handle<Code> code);
-  int DefineDeoptimizationLiteral(Handle<Object> literal);
-
-  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
-  Register ToRegister(int index) const;
-  XMMRegister ToDoubleRegister(int index) const;
-  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
-  int32_t ToInteger32(LConstantOperand* op) const;
-  ExternalReference ToExternalReference(LConstantOperand* op) const;
-
-  Operand BuildFastArrayOperand(LOperand* elements_pointer,
-                                LOperand* key,
-                                Representation key_representation,
-                                ElementsKind elements_kind,
-                                uint32_t base_offset);
-
-  Operand BuildSeqStringOperand(Register string,
-                                LOperand* index,
-                                String::Encoding encoding);
-
-  void EmitIntegerMathAbs(LMathAbs* instr);
-
-  // Support for recording safepoint and position information.
-  void RecordSafepoint(LPointerMap* pointers,
-                       Safepoint::Kind kind,
-                       int arguments,
-                       Safepoint::DeoptMode mode);
-  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
-  void RecordSafepoint(Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegisters(LPointerMap* pointers,
-                                    int arguments,
-                                    Safepoint::DeoptMode mode);
-
-  void RecordAndWritePosition(int position) OVERRIDE;
-
-  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
-  void EmitGoto(int block);
-
-  // EmitBranch expects to be the last instruction of a block.
-  template<class InstrType>
-  void EmitBranch(InstrType instr, Condition cc);
-  template<class InstrType>
-  void EmitFalseBranch(InstrType instr, Condition cc);
-  void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
-                        XMMRegister result, NumberUntagDMode mode);
-
-  // Emits optimized code for typeof x == "y".  Modifies input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
-
-  // Emits optimized code for %_IsObject(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsObject(Register input,
-                         Register temp1,
-                         Label* is_not_object,
-                         Label* is_object);
-
-  // Emits optimized code for %_IsString(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsString(Register input,
-                         Register temp1,
-                         Label* is_not_string,
-                         SmiCheck check_needed);
-
-  // Emits optimized code for %_IsConstructCall().
-  // Caller should branch on equal condition.
-  void EmitIsConstructCall(Register temp);
-
-  // Emits optimized code to deep-copy the contents of statically known
-  // object graphs (e.g. object literal boilerplate).
-  void EmitDeepCopy(Handle<JSObject> object,
-                    Register result,
-                    Register source,
-                    int* offset,
-                    AllocationSiteMode mode);
-
-  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
-  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
-  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
-  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
-  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
-  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
-  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
-  template <class T>
-  void EmitVectorLoadICRegisters(T* instr);
-
-  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
-
-  // Emits code for pushing either a tagged constant, a (non-double)
-  // register, or a stack slot operand.
-  void EmitPushTaggedOperand(LOperand* operand);
-
-  friend class LGapResolver;
-
-#ifdef _MSC_VER
-  // On windows, you may not access the stack more than one page below
-  // the most recently mapped page. To make the allocated area randomly
-  // accessible, we write an arbitrary value to each page in range
-  // esp + offset - page_size .. esp in turn.
-  void MakeSureStackPagesMapped(int offset);
-#endif
-
-  ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
-  ZoneList<Handle<Object> > deoptimization_literals_;
-  int inlined_function_count_;
-  Scope* const scope_;
-  TranslationBuffer translations_;
-  ZoneList<LDeferredCode*> deferred_;
-  bool dynamic_frame_alignment_;
-  bool support_aligned_spilled_doubles_;
-  int osr_pc_offset_;
-  bool frame_is_built_;
-
-  // Builder that keeps track of safepoints in the code. The table
-  // itself is emitted at the end of the generated code.
-  SafepointTableBuilder safepoints_;
-
-  // Compiler from a set of parallel moves to a sequential list of moves.
-  LGapResolver resolver_;
-
-  Safepoint::Kind expected_safepoint_kind_;
-
-  class PushSafepointRegistersScope FINAL  BASE_EMBEDDED {
-   public:
-    explicit PushSafepointRegistersScope(LCodeGen* codegen)
-        : codegen_(codegen) {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->masm_->PushSafepointRegisters();
-      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      DCHECK(codegen_->info()->is_calling());
-    }
-
-    ~PushSafepointRegistersScope() {
-      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
-      codegen_->masm_->PopSafepointRegisters();
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
-
-   private:
-    LCodeGen* codegen_;
-  };
-
-  friend class LDeferredCode;
-  friend class LEnvironment;
-  friend class SafepointGenerator;
-  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
-  explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen),
-        external_exit_(NULL),
-        instruction_index_(codegen->current_instruction_) {
-    codegen->AddDeferredCode(this);
-  }
-
-  virtual ~LDeferredCode() {}
-  virtual void Generate() = 0;
-  virtual LInstruction* instr() = 0;
-
-  void SetExit(Label* exit) { external_exit_ = exit; }
-  Label* entry() { return &entry_; }
-  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
-  int instruction_index() const { return instruction_index_; }
-
- protected:
-  LCodeGen* codegen() const { return codegen_; }
-  MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
-  LCodeGen* codegen_;
-  Label entry_;
-  Label exit_;
-  Label* external_exit_;
-  Label done_;
-  int instruction_index_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
deleted file mode 100644
index 682503b..0000000
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
-    : cgen_(owner),
-      moves_(32, owner->zone()),
-      source_uses_(),
-      destination_uses_(),
-      spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  DCHECK(HasBeenReset());
-  // Build up a worklist of moves.
-  BuildInitialMoveList(parallel_move);
-
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands move = moves_[i];
-    // Skip constants to perform them last.  They don't block other moves
-    // and skipping such moves with register destinations keeps those
-    // registers free for the whole algorithm.
-    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
-      PerformMove(i);
-    }
-  }
-
-  // Perform the moves with constant sources.
-  for (int i = 0; i < moves_.length(); ++i) {
-    if (!moves_[i].IsEliminated()) {
-      DCHECK(moves_[i].source()->IsConstantOperand());
-      EmitMove(i);
-    }
-  }
-
-  Finish();
-  DCHECK(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
-  // Perform a linear sweep of the moves to add them to the initial list of
-  // moves to perform, ignoring any move that is redundant (the source is
-  // the same as the destination, the destination is ignored and
-  // unallocated, or the move was already eliminated).
-  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
-  for (int i = 0; i < moves->length(); ++i) {
-    LMoveOperands move = moves->at(i);
-    if (!move.IsRedundant()) AddMove(move);
-  }
-  Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
-  // Each call to this function performs a move and deletes it from the move
-  // graph.  We first recursively perform any move blocking this one.  We
-  // mark a move as "pending" on entry to PerformMove in order to detect
-  // cycles in the move graph.  We use operand swaps to resolve cycles,
-  // which means that a call to PerformMove could change any source operand
-  // in the move graph.
-
-  DCHECK(!moves_[index].IsPending());
-  DCHECK(!moves_[index].IsRedundant());
-
-  // Clear this move's destination to indicate a pending move.  The actual
-  // destination is saved on the side.
-  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
-  LOperand* destination = moves_[index].destination();
-  moves_[index].set_destination(NULL);
-
-  // Perform a depth-first traversal of the move graph to resolve
-  // dependencies.  Any unperformed, unpending move with a source the same
-  // as this one's destination blocks this one so recursively perform all
-  // such moves.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands other_move = moves_[i];
-    if (other_move.Blocks(destination) && !other_move.IsPending()) {
-      // Though PerformMove can change any source operand in the move graph,
-      // this call cannot create a blocking move via a swap (this loop does
-      // not miss any).  Assume there is a non-blocking move with source A
-      // and this move is blocked on source B and there is a swap of A and
-      // B.  Then A and B must be involved in the same cycle (or they would
-      // not be swapped).  Since this move's destination is B and there is
-      // only a single incoming edge to an operand, this move must also be
-      // involved in the same cycle.  In that case, the blocking move will
-      // be created but will be "pending" when we return from PerformMove.
-      PerformMove(i);
-    }
-  }
-
-  // We are about to resolve this move and don't need it marked as
-  // pending, so restore its destination.
-  moves_[index].set_destination(destination);
-
-  // This move's source may have changed due to swaps to resolve cycles and
-  // so it may now be the last move in the cycle.  If so remove it.
-  if (moves_[index].source()->Equals(destination)) {
-    RemoveMove(index);
-    return;
-  }
-
-  // The move may be blocked on a (at most one) pending move, in which case
-  // we have a cycle.  Search for such a blocking move and perform a swap to
-  // resolve it.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands other_move = moves_[i];
-    if (other_move.Blocks(destination)) {
-      DCHECK(other_move.IsPending());
-      EmitSwap(index);
-      return;
-    }
-  }
-
-  // This move is not blocked.
-  EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
-  LOperand* source = move.source();
-  if (source->IsRegister()) ++source_uses_[source->index()];
-
-  LOperand* destination = move.destination();
-  if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
-  moves_.Add(move, cgen_->zone());
-}
-
-
-void LGapResolver::RemoveMove(int index) {
-  LOperand* source = moves_[index].source();
-  if (source->IsRegister()) {
-    --source_uses_[source->index()];
-    DCHECK(source_uses_[source->index()] >= 0);
-  }
-
-  LOperand* destination = moves_[index].destination();
-  if (destination->IsRegister()) {
-    --destination_uses_[destination->index()];
-    DCHECK(destination_uses_[destination->index()] >= 0);
-  }
-
-  moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
-  int count = 0;
-  for (int i = 0; i < moves_.length(); ++i) {
-    if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
-      ++count;
-    }
-  }
-  return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
-  int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
-      return Register::FromAllocationIndex(i);
-    }
-  }
-  return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
-  if (!moves_.is_empty()) return false;
-  if (spilled_register_ >= 0) return false;
-
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    if (source_uses_[i] != 0) return false;
-    if (destination_uses_[i] != 0) return false;
-  }
-  return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
-  // No operand should be the destination for more than one move.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LOperand* destination = moves_[i].destination();
-    for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
-    }
-  }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
-  if (spilled_register_ >= 0) {
-    __ pop(Register::FromAllocationIndex(spilled_register_));
-    spilled_register_ = -1;
-  }
-  moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
-  if (operand->IsRegister() && operand->index() == spilled_register_) {
-    __ pop(Register::FromAllocationIndex(spilled_register_));
-    spilled_register_ = -1;
-  }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
-  // 1. We may have already spilled to create a temp register.
-  if (spilled_register_ >= 0) {
-    return Register::FromAllocationIndex(spilled_register_);
-  }
-
-  // 2. We may have a free register that we can use without spilling.
-  Register free = GetFreeRegisterNot(no_reg);
-  if (!free.is(no_reg)) return free;
-
-  // 3. Prefer to spill a register that is not used in any remaining move
-  // because it will not need to be restored until the end.
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
-      Register scratch = Register::FromAllocationIndex(i);
-      __ push(scratch);
-      spilled_register_ = i;
-      return scratch;
-    }
-  }
-
-  // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
-  Register scratch = Register::FromAllocationIndex(0);
-  __ push(scratch);
-  spilled_register_ = 0;
-  return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
-  LOperand* source = moves_[index].source();
-  LOperand* destination = moves_[index].destination();
-  EnsureRestored(source);
-  EnsureRestored(destination);
-
-  // Dispatch on the source and destination operand kinds.  Not all
-  // combinations are possible.
-  if (source->IsRegister()) {
-    DCHECK(destination->IsRegister() || destination->IsStackSlot());
-    Register src = cgen_->ToRegister(source);
-    Operand dst = cgen_->ToOperand(destination);
-    __ mov(dst, src);
-
-  } else if (source->IsStackSlot()) {
-    DCHECK(destination->IsRegister() || destination->IsStackSlot());
-    Operand src = cgen_->ToOperand(source);
-    if (destination->IsRegister()) {
-      Register dst = cgen_->ToRegister(destination);
-      __ mov(dst, src);
-    } else {
-      // Spill on demand to use a temporary register for memory-to-memory
-      // moves.
-      Register tmp = EnsureTempRegister();
-      Operand dst = cgen_->ToOperand(destination);
-      __ mov(tmp, src);
-      __ mov(dst, tmp);
-    }
-
-  } else if (source->IsConstantOperand()) {
-    LConstantOperand* constant_source = LConstantOperand::cast(source);
-    if (destination->IsRegister()) {
-      Register dst = cgen_->ToRegister(destination);
-      Representation r = cgen_->IsSmi(constant_source)
-          ? Representation::Smi() : Representation::Integer32();
-      if (cgen_->IsInteger32(constant_source)) {
-        __ Move(dst, cgen_->ToImmediate(constant_source, r));
-      } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
-      }
-    } else if (destination->IsDoubleRegister()) {
-      double v = cgen_->ToDouble(constant_source);
-      uint64_t int_val = bit_cast<uint64_t, double>(v);
-      int32_t lower = static_cast<int32_t>(int_val);
-      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      if (int_val == 0) {
-        __ xorps(dst, dst);
-      } else {
-        __ push(Immediate(upper));
-        __ push(Immediate(lower));
-        __ movsd(dst, Operand(esp, 0));
-        __ add(esp, Immediate(kDoubleSize));
-      }
-    } else {
-      DCHECK(destination->IsStackSlot());
-      Operand dst = cgen_->ToOperand(destination);
-      Representation r = cgen_->IsSmi(constant_source)
-          ? Representation::Smi() : Representation::Integer32();
-      if (cgen_->IsInteger32(constant_source)) {
-        __ Move(dst, cgen_->ToImmediate(constant_source, r));
-      } else {
-        Register tmp = EnsureTempRegister();
-        __ LoadObject(tmp, cgen_->ToHandle(constant_source));
-        __ mov(dst, tmp);
-      }
-    }
-
-  } else if (source->IsDoubleRegister()) {
-    XMMRegister src = cgen_->ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movaps(dst, src);
-    } else {
-      DCHECK(destination->IsDoubleStackSlot());
-      Operand dst = cgen_->ToOperand(destination);
-      __ movsd(dst, src);
-    }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
-    Operand src = cgen_->ToOperand(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movsd(dst, src);
-    } else {
-      // We rely on having xmm0 available as a fixed scratch register.
-      Operand dst = cgen_->ToOperand(destination);
-      __ movsd(xmm0, src);
-      __ movsd(dst, xmm0);
-    }
-  } else {
-    UNREACHABLE();
-  }
-
-  RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
-  LOperand* source = moves_[index].source();
-  LOperand* destination = moves_[index].destination();
-  EnsureRestored(source);
-  EnsureRestored(destination);
-
-  // Dispatch on the source and destination operand kinds.  Not all
-  // combinations are possible.
-  if (source->IsRegister() && destination->IsRegister()) {
-    // Register-register.
-    Register src = cgen_->ToRegister(source);
-    Register dst = cgen_->ToRegister(destination);
-    __ xchg(dst, src);
-
-  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
-             (source->IsStackSlot() && destination->IsRegister())) {
-    // Register-memory.  Use a free register as a temp if possible.  Do not
-    // spill on demand because the simple spill implementation cannot avoid
-    // spilling src at this point.
-    Register tmp = GetFreeRegisterNot(no_reg);
-    Register reg =
-        cgen_->ToRegister(source->IsRegister() ? source : destination);
-    Operand mem =
-        cgen_->ToOperand(source->IsRegister() ? destination : source);
-    if (tmp.is(no_reg)) {
-      __ xor_(reg, mem);
-      __ xor_(mem, reg);
-      __ xor_(reg, mem);
-    } else {
-      __ mov(tmp, mem);
-      __ mov(mem, reg);
-      __ mov(reg, tmp);
-    }
-
-  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
-    // Memory-memory.  Spill on demand to use a temporary.  If there is a
-    // free register after that, use it as a second temporary.
-    Register tmp0 = EnsureTempRegister();
-    Register tmp1 = GetFreeRegisterNot(tmp0);
-    Operand src = cgen_->ToOperand(source);
-    Operand dst = cgen_->ToOperand(destination);
-    if (tmp1.is(no_reg)) {
-      // Only one temp register available to us.
-      __ mov(tmp0, dst);
-      __ xor_(tmp0, src);
-      __ xor_(src, tmp0);
-      __ xor_(tmp0, src);
-      __ mov(dst, tmp0);
-    } else {
-      __ mov(tmp0, dst);
-      __ mov(tmp1, src);
-      __ mov(dst, tmp1);
-      __ mov(src, tmp0);
-    }
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
-    // XMM register-register swap. We rely on having xmm0
-    // available as a fixed scratch register.
-    XMMRegister src = cgen_->ToDoubleRegister(source);
-    XMMRegister dst = cgen_->ToDoubleRegister(destination);
-    __ movaps(xmm0, src);
-    __ movaps(src, dst);
-    __ movaps(dst, xmm0);
-  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
-    // XMM register-memory swap.  We rely on having xmm0
-    // available as a fixed scratch register.
-    DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
-    XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
-                                              ? source
-                                              : destination);
-    Operand other =
-        cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
-    __ movsd(xmm0, other);
-    __ movsd(other, reg);
-    __ movaps(reg, xmm0);
-  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
-    // Double-width memory-to-memory.  Spill on demand to use a general
-    // purpose temporary register and also rely on having xmm0 available as
-    // a fixed scratch register.
-    Register tmp = EnsureTempRegister();
-    Operand src0 = cgen_->ToOperand(source);
-    Operand src1 = cgen_->HighOperand(source);
-    Operand dst0 = cgen_->ToOperand(destination);
-    Operand dst1 = cgen_->HighOperand(destination);
-    __ movsd(xmm0, dst0);  // Save destination in xmm0.
-    __ mov(tmp, src0);  // Then use tmp to copy source to destination.
-    __ mov(dst0, tmp);
-    __ mov(tmp, src1);
-    __ mov(dst1, tmp);
-    __ movsd(src0, xmm0);
-
-  } else {
-    // No other combinations are possible.
-    UNREACHABLE();
-  }
-
-  // The swap of source and destination has executed a move from source to
-  // destination.
-  RemoveMove(index);
-
-  // Any unperformed (including pending) move with a source of either
-  // this move's source or destination needs to have their source
-  // changed to reflect the state of affairs after the swap.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands other_move = moves_[i];
-    if (other_move.Blocks(source)) {
-      moves_[i].set_source(destination);
-    } else if (other_move.Blocks(destination)) {
-      moves_[i].set_source(source);
-    }
-  }
-
-  // In addition to swapping the actual uses as sources, we need to update
-  // the use counts.
-  if (source->IsRegister() && destination->IsRegister()) {
-    int temp = source_uses_[source->index()];
-    source_uses_[source->index()] = source_uses_[destination->index()];
-    source_uses_[destination->index()] = temp;
-  } else if (source->IsRegister()) {
-    // We don't have use counts for non-register operands like destination.
-    // Compute those counts now.
-    source_uses_[source->index()] = CountSourceUses(source);
-  } else if (destination->IsRegister()) {
-    source_uses_[destination->index()] = CountSourceUses(destination);
-  }
-}
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
deleted file mode 100644
index 43df245..0000000
--- a/src/ia32/lithium-gap-resolver-ia32.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-
-#include "src/v8.h"
-
-#include "src/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver FINAL BASE_EMBEDDED {
- public:
-  explicit LGapResolver(LCodeGen* owner);
-
-  // Resolve a set of parallel moves, emitting assembler instructions.
-  void Resolve(LParallelMove* parallel_move);
-
- private:
-  // Build the initial list of moves.
-  void BuildInitialMoveList(LParallelMove* parallel_move);
-
-  // Perform the move at the moves_ index in question (possibly requiring
-  // other moves to satisfy dependencies).
-  void PerformMove(int index);
-
-  // Emit any code necessary at the end of a gap move.
-  void Finish();
-
-  // Add or delete a move from the move graph without emitting any code.
-  // Used to build up the graph and remove trivial moves.
-  void AddMove(LMoveOperands move);
-  void RemoveMove(int index);
-
-  // Report the count of uses of operand as a source in a not-yet-performed
-  // move.  Used to rebuild use counts.
-  int CountSourceUses(LOperand* operand);
-
-  // Emit a move and remove it from the move graph.
-  void EmitMove(int index);
-
-  // Execute a move by emitting a swap of two operands.  The move from
-  // source to destination is removed from the move graph.
-  void EmitSwap(int index);
-
-  // Ensure that the given operand is not spilled.
-  void EnsureRestored(LOperand* operand);
-
-  // Return a register that can be used as a temp register, spilling
-  // something if necessary.
-  Register EnsureTempRegister();
-
-  // Return a known free register different from the given one (which could
-  // be no_reg---returning any free register), or no_reg if there is no such
-  // register.
-  Register GetFreeRegisterNot(Register reg);
-
-  // Verify that the state is the initial one, ready to resolve a single
-  // parallel move.
-  bool HasBeenReset();
-
-  // Verify the move list before performing moves.
-  void Verify();
-
-  LCodeGen* cgen_;
-
-  // List of moves not yet resolved.
-  ZoneList<LMoveOperands> moves_;
-
-  // Source and destination use counts for the general purpose registers.
-  int source_uses_[Register::kMaxNumAllocatableRegisters];
-  int destination_uses_[Register::kMaxNumAllocatableRegisters];
-
-  // If we had to spill on demand, the currently spilled register's
-  // allocation index.
-  int spilled_register_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
deleted file mode 100644
index 3be2fc4..0000000
--- a/src/ia32/lithium-ia32.cc
+++ /dev/null
@@ -1,2722 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <sstream>
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/hydrogen-osr.h"
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/lithium-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type)                            \
-  void L##type::CompileToNative(LCodeGen* generator) {  \
-    generator->Do##type(this);                          \
-  }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as temporaries and
-  // outputs because all registers are blocked by the calling convention.
-  // Inputs operands must use a fixed register or use-at-start policy or
-  // a non-register policy.
-  DCHECK(Output() == NULL ||
-         LUnallocated::cast(Output())->HasFixedPolicy() ||
-         !LUnallocated::cast(Output())->HasRegisterPolicy());
-  for (UseIterator it(this); !it.Done(); it.Advance()) {
-    LUnallocated* operand = LUnallocated::cast(it.Current());
-    DCHECK(operand->HasFixedPolicy() ||
-           operand->IsUsedAtStart());
-  }
-  for (TempIterator it(this); !it.Done(); it.Advance()) {
-    LUnallocated* operand = LUnallocated::cast(it.Current());
-    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
-  }
-}
-#endif
-
-
-bool LInstruction::HasDoubleRegisterResult() {
-  return HasResult() && result()->IsDoubleRegister();
-}
-
-
-bool LInstruction::HasDoubleRegisterInput() {
-  for (int i = 0; i < InputCount(); i++) {
-    LOperand* op = InputAt(i);
-    if (op != NULL && op->IsDoubleRegister()) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
-  stream->Add("%s ", this->Mnemonic());
-
-  PrintOutputOperandTo(stream);
-
-  PrintDataTo(stream);
-
-  if (HasEnvironment()) {
-    stream->Add(" ");
-    environment()->PrintTo(stream);
-  }
-
-  if (HasPointerMap()) {
-    stream->Add(" ");
-    pointer_map()->PrintTo(stream);
-  }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  for (int i = 0; i < InputCount(); i++) {
-    if (i > 0) stream->Add(" ");
-    if (InputAt(i) == NULL) {
-      stream->Add("NULL");
-    } else {
-      InputAt(i)->PrintTo(stream);
-    }
-  }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
-  if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
-  LGap::PrintDataTo(stream);
-  LLabel* rep = replacement();
-  if (rep != NULL) {
-    stream->Add(" Dead block replaced with B%d", rep->block_id());
-  }
-}
-
-
-bool LGap::IsRedundant() const {
-  for (int i = 0; i < 4; i++) {
-    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
-      return false;
-    }
-  }
-
-  return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
-  for (int i = 0; i < 4; i++) {
-    stream->Add("(");
-    if (parallel_moves_[i] != NULL) {
-      parallel_moves_[i]->PrintDataTo(stream);
-    }
-    stream->Add(") ");
-  }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
-  switch (op()) {
-    case Token::ADD: return "add-d";
-    case Token::SUB: return "sub-d";
-    case Token::MUL: return "mul-d";
-    case Token::DIV: return "div-d";
-    case Token::MOD: return "mod-d";
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
-  switch (op()) {
-    case Token::ADD: return "add-t";
-    case Token::SUB: return "sub-t";
-    case Token::MUL: return "mul-t";
-    case Token::MOD: return "mod-t";
-    case Token::DIV: return "div-t";
-    case Token::BIT_AND: return "bit-and-t";
-    case Token::BIT_OR: return "bit-or-t";
-    case Token::BIT_XOR: return "bit-xor-t";
-    case Token::ROR: return "ror-t";
-    case Token::SHL: return "sal-t";
-    case Token::SAR: return "sar-t";
-    case Token::SHR: return "shr-t";
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
-  return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
-  stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
-  value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if ");
-  left()->PrintTo(stream);
-  stream->Add(" %s ", Token::String(op()));
-  right()->PrintTo(stream);
-  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_object(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_string(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_smi(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_undetectable(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if string_compare(");
-  left()->PrintTo(stream);
-  right()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_instance_type(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  value()->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if class_of_test(");
-  value()->PrintTo(stream);
-  stream->Add(", \"%o\") then B%d else B%d",
-              *hydrogen()->class_name(),
-              true_block_id(),
-              false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if typeof ");
-  value()->PrintTo(stream);
-  stream->Add(" == \"%s\" then B%d else B%d",
-              hydrogen()->type_literal()->ToCString().get(),
-              true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
-  stream->Add(" = ");
-  function()->PrintTo(stream);
-  stream->Add(".code_entry = ");
-  code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
-  stream->Add(" = ");
-  base_object()->PrintTo(stream);
-  stream->Add(" + ");
-  offset()->PrintTo(stream);
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
-  for (int i = 0; i < InputCount(); i++) {
-    InputAt(i)->PrintTo(stream);
-    stream->Add(" ");
-  }
-  stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add("[%d] <- ", slot_index());
-  value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  constructor()->PrintTo(stream);
-  stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  constructor()->PrintTo(stream);
-  stream->Add(" #%d / ", arity());
-  ElementsKind kind = hydrogen()->elements_kind();
-  stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
-  arguments()->PrintTo(stream);
-
-  stream->Add(" length ");
-  length()->PrintTo(stream);
-
-  stream->Add(" index ");
-  index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
-  // Skip a slot if for a double-width slot.
-  if (kind == DOUBLE_REGISTERS) {
-    spill_slot_count_++;
-    spill_slot_count_ |= 1;
-    num_double_slots_++;
-  }
-  return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
-  int index = GetNextSpillIndex(kind);
-  if (kind == DOUBLE_REGISTERS) {
-    return LDoubleStackSlot::Create(index, zone());
-  } else {
-    DCHECK(kind == GENERAL_REGISTERS);
-    return LStackSlot::Create(index, zone());
-  }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  std::ostringstream os;
-  os << hydrogen()->access() << " <- ";
-  stream->Add(os.str().c_str());
-  value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
-  elements()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  if (hydrogen()->IsDehoisted()) {
-    stream->Add(" + %d]", base_offset());
-  } else {
-    stream->Add("]");
-  }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
-  elements()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  if (hydrogen()->IsDehoisted()) {
-    stream->Add(" + %d] <-", base_offset());
-  } else {
-    stream->Add("] <- ");
-  }
-
-  if (value() == NULL) {
-    DCHECK(hydrogen()->IsConstantHoleStore() &&
-           hydrogen()->value()->representation().IsDouble());
-    stream->Add("<the hole(nan)>");
-  } else {
-    value()->PrintTo(stream);
-  }
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
-  DCHECK(is_unused());
-  chunk_ = new(zone()) LPlatformChunk(info(), graph());
-  LPhase phase("L_Building chunk", chunk_);
-  status_ = BUILDING;
-
-  // Reserve the first spill slot for the state of dynamic alignment.
-  if (info()->IsOptimizing()) {
-    int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    DCHECK_EQ(alignment_state_index, 0);
-    USE(alignment_state_index);
-  }
-
-  // If compiling for OSR, reserve space for the unoptimized frame,
-  // which will be subsumed into this frame.
-  if (graph()->has_osr()) {
-    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
-      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    }
-  }
-
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
-  for (int i = 0; i < blocks->length(); i++) {
-    HBasicBlock* next = NULL;
-    if (i < blocks->length() - 1) next = blocks->at(i + 1);
-    DoBasicBlock(blocks->at(i), next);
-    if (is_aborted()) return NULL;
-  }
-  status_ = DONE;
-  return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
-                                  Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                                  XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
-  return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
-  return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
-  return Use(value,
-             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                                      LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
-                                             LUnallocated::USED_AT_START));
-}
-
-
-static inline bool CanBeImmediateConstant(HValue* value) {
-  return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
-  return CanBeImmediateConstant(value)
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
-  return CanBeImmediateConstant(value)
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
-                                            Register fixed_register) {
-  return CanBeImmediateConstant(value)
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseFixed(value, fixed_register);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
-  return CanBeImmediateConstant(value)
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
-  return CanBeImmediateConstant(value)
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
-  return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
-  if (value->EmitAtUses()) {
-    HInstruction* instr = HInstruction::cast(value);
-    VisitInstruction(instr);
-  }
-  operand->set_virtual_register(value->id());
-  return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
-                                    LUnallocated* result) {
-  result->set_virtual_register(current_instruction_->id());
-  instr->set_result(result);
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
-    LTemplateResultInstruction<1>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
-    LTemplateResultInstruction<1>* instr,
-    int index) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
-    LTemplateResultInstruction<1>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
-                                         Register reg) {
-  return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
-    LTemplateResultInstruction<1>* instr,
-    XMMRegister reg) {
-  return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
-  HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(
-      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
-                                        HInstruction* hinstr,
-                                        CanDeoptimize can_deoptimize) {
-  info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
-  instr->VerifyCall();
-#endif
-  instr->MarkAsCall();
-  instr = AssignPointerMap(instr);
-
-  // If instruction does not have side-effects lazy deoptimization
-  // after the call will try to deoptimize to the point before the call.
-  // Thus we still need to attach environment to this call even if
-  // call sequence can not deoptimize eagerly.
-  bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
-      !hinstr->HasObservableSideEffects();
-  if (needs_environment && !instr->HasEnvironment()) {
-    instr = AssignEnvironment(instr);
-    // We can't really figure out if the environment is needed or not.
-    instr->environment()->set_has_been_used();
-  }
-
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  DCHECK(!instr->HasPointerMap());
-  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
-  return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand =
-      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  int vreg = allocator_->GetVirtualRegister();
-  if (!allocator_->AllocationOk()) {
-    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
-    vreg = 0;
-  }
-  operand->set_virtual_register(vreg);
-  return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
-  LUnallocated* operand = ToUnallocated(reg);
-  DCHECK(operand->HasFixedPolicy());
-  return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
-  LUnallocated* operand = ToUnallocated(reg);
-  DCHECK(operand->HasFixedPolicy());
-  return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
-  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
-                                     HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    LOperand* left = UseRegisterAtStart(instr->left());
-
-    HValue* right_value = instr->right();
-    LOperand* right = NULL;
-    int constant_value = 0;
-    bool does_deopt = false;
-    if (right_value->IsConstant()) {
-      HConstant* constant = HConstant::cast(right_value);
-      right = chunk_->DefineConstantOperand(constant);
-      constant_value = constant->Integer32Value() & 0x1f;
-      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
-      // truncated to smi.
-      if (instr->representation().IsSmi() && constant_value > 0) {
-        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
-      }
-    } else {
-      right = UseFixed(right_value, ecx);
-    }
-
-    // Shift operations can only deoptimize if we do a logical shift by 0 and
-    // the result cannot be truncated to int32.
-    if (op == Token::SHR && constant_value == 0) {
-      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
-    }
-
-    LInstruction* result =
-        DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
-    return does_deopt ? AssignEnvironment(result) : result;
-  } else {
-    return DoArithmeticT(op, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
-                                           HArithmeticBinaryOperation* instr) {
-  DCHECK(instr->representation().IsDouble());
-  DCHECK(instr->left()->representation().IsDouble());
-  DCHECK(instr->right()->representation().IsDouble());
-  if (op == Token::MOD) {
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
-    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
-    return MarkAsCall(DefineSameAsFirst(result), instr);
-  } else {
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
-    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
-    return DefineSameAsFirst(result);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
-                                           HBinaryOperation* instr) {
-  HValue* left = instr->left();
-  HValue* right = instr->right();
-  DCHECK(left->representation().IsTagged());
-  DCHECK(right->representation().IsTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left_operand = UseFixed(left, edx);
-  LOperand* right_operand = UseFixed(right, eax);
-  LArithmeticT* result =
-      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  DCHECK(is_building());
-  current_block_ = block;
-  next_block_ = next_block;
-  if (block->IsStartBlock()) {
-    block->UpdateEnvironment(graph_->start_environment());
-    argument_count_ = 0;
-  } else if (block->predecessors()->length() == 1) {
-    // We have a single predecessor => copy environment and outgoing
-    // argument count from the predecessor.
-    DCHECK(block->phis()->length() == 0);
-    HBasicBlock* pred = block->predecessors()->at(0);
-    HEnvironment* last_environment = pred->last_environment();
-    DCHECK(last_environment != NULL);
-    // Only copy the environment, if it is later used again.
-    if (pred->end()->SecondSuccessor() == NULL) {
-      DCHECK(pred->end()->FirstSuccessor() == block);
-    } else {
-      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
-          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
-        last_environment = last_environment->Copy();
-      }
-    }
-    block->UpdateEnvironment(last_environment);
-    DCHECK(pred->argument_count() >= 0);
-    argument_count_ = pred->argument_count();
-  } else {
-    // We are at a state join => process phis.
-    HBasicBlock* pred = block->predecessors()->at(0);
-    // No need to copy the environment, it cannot be used later.
-    HEnvironment* last_environment = pred->last_environment();
-    for (int i = 0; i < block->phis()->length(); ++i) {
-      HPhi* phi = block->phis()->at(i);
-      if (phi->HasMergedIndex()) {
-        last_environment->SetValueAt(phi->merged_index(), phi);
-      }
-    }
-    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
-      if (block->deleted_phis()->at(i) < last_environment->length()) {
-        last_environment->SetValueAt(block->deleted_phis()->at(i),
-                                     graph_->GetConstantUndefined());
-      }
-    }
-    block->UpdateEnvironment(last_environment);
-    // Pick up the outgoing argument count of one of the predecessors.
-    argument_count_ = pred->argument_count();
-  }
-  HInstruction* current = block->first();
-  int start = chunk_->instructions()->length();
-  while (current != NULL && !is_aborted()) {
-    // Code for constants in registers is generated lazily.
-    if (!current->EmitAtUses()) {
-      VisitInstruction(current);
-    }
-    current = current->next();
-  }
-  int end = chunk_->instructions()->length() - 1;
-  if (end >= start) {
-    block->set_first_instruction_index(start);
-    block->set_last_instruction_index(end);
-  }
-  block->set_argument_count(argument_count_);
-  next_block_ = NULL;
-  current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
-  HInstruction* old_current = current_instruction_;
-  current_instruction_ = current;
-
-  LInstruction* instr = NULL;
-  if (current->CanReplaceWithDummyUses()) {
-    if (current->OperandCount() == 0) {
-      instr = DefineAsRegister(new(zone()) LDummy());
-    } else {
-      DCHECK(!current->OperandAt(0)->IsControlInstruction());
-      instr = DefineAsRegister(new(zone())
-          LDummyUse(UseAny(current->OperandAt(0))));
-    }
-    for (int i = 1; i < current->OperandCount(); ++i) {
-      if (current->OperandAt(i)->IsControlInstruction()) continue;
-      LInstruction* dummy =
-          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
-      dummy->set_hydrogen_value(current);
-      chunk_->AddInstruction(dummy, current_block_);
-    }
-  } else {
-    HBasicBlock* successor;
-    if (current->IsControlInstruction() &&
-        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
-        successor != NULL) {
-      instr = new(zone()) LGoto(successor);
-    } else {
-      instr = current->CompileToLithium(this);
-    }
-  }
-
-  argument_count_ += current->argument_delta();
-  DCHECK(argument_count_ >= 0);
-
-  if (instr != NULL) {
-    AddInstruction(instr, current);
-  }
-
-  current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
-                                   HInstruction* hydrogen_val) {
-  // Associate the hydrogen instruction first, since we may need it for
-  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
-  instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
-  // Make sure that the lithium instruction has either no fixed register
-  // constraints in temps or the result OR no uses that are only used at
-  // start. If this invariant doesn't hold, the register allocator can decide
-  // to insert a split of a range immediately before the instruction due to an
-  // already allocated register needing to be used for the instruction's fixed
-  // register constraint. In this case, The register allocator won't see an
-  // interference between the split child and the use-at-start (it would if
-  // the it was just a plain use), so it is free to move the split child into
-  // the same register that is used for the use-at-start.
-  // See https://code.google.com/p/chromium/issues/detail?id=201590
-  if (!(instr->ClobbersRegisters() &&
-        instr->ClobbersDoubleRegisters(isolate()))) {
-    int fixed = 0;
-    int used_at_start = 0;
-    for (UseIterator it(instr); !it.Done(); it.Advance()) {
-      LUnallocated* operand = LUnallocated::cast(it.Current());
-      if (operand->IsUsedAtStart()) ++used_at_start;
-    }
-    if (instr->Output() != NULL) {
-      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
-    }
-    for (TempIterator it(instr); !it.Done(); it.Advance()) {
-      LUnallocated* operand = LUnallocated::cast(it.Current());
-      if (operand->HasFixedPolicy()) ++fixed;
-    }
-    DCHECK(fixed == 0 || used_at_start == 0);
-  }
-#endif
-
-  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
-    instr = AssignPointerMap(instr);
-  }
-  if (FLAG_stress_environments && !instr->HasEnvironment()) {
-    instr = AssignEnvironment(instr);
-  }
-  chunk_->AddInstruction(instr, current_block_);
-
-  if (instr->IsCall()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    LInstruction* instruction_needing_environment = NULL;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      instruction_needing_environment = instr;
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-    if (instruction_needing_environment != NULL) {
-      // Store the lazy deopt environment with the instruction if needed.
-      // Right now it is only used for LInstanceOfKnownGlobal.
-      instruction_needing_environment->
-          SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
-    }
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* value = instr->value();
-  Representation r = value->representation();
-  HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
-
-  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
-      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
-  LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
-  LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
-  if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
-       !expected.IsGeneric())) {
-    branch = AssignEnvironment(branch);
-  }
-  return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
-  return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  info()->MarkAsRequiresFrame();
-  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  info()->MarkAsRequiresFrame();
-  return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
-  LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
-    HInstanceOfKnownGlobal* instr) {
-  LInstanceOfKnownGlobal* result =
-      new(zone()) LInstanceOfKnownGlobal(
-          UseFixed(instr->context(), esi),
-          UseFixed(instr->left(), InstanceofStub::left()),
-          FixedTemp(edi));
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
-  LOperand* receiver = UseRegister(instr->receiver());
-  LOperand* function = UseRegister(instr->function());
-  LOperand* temp = TempRegister();
-  LWrapReceiver* result =
-      new(zone()) LWrapReceiver(receiver, function, temp);
-  return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
-  LOperand* function = UseFixed(instr->function(), edi);
-  LOperand* receiver = UseFixed(instr->receiver(), eax);
-  LOperand* length = UseFixed(instr->length(), ebx);
-  LOperand* elements = UseFixed(instr->elements(), ecx);
-  LApplyArguments* result = new(zone()) LApplyArguments(function,
-                                                        receiver,
-                                                        length,
-                                                        elements);
-  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
-  int argc = instr->OperandCount();
-  for (int i = 0; i < argc; ++i) {
-    LOperand* argument = UseAny(instr->argument(i));
-    AddInstruction(new(zone()) LPushArgument(argument), instr);
-  }
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
-    HStoreCodeEntry* store_code_entry) {
-  LOperand* function = UseRegister(store_code_entry->function());
-  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
-  return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
-    HInnerAllocatedObject* instr) {
-  LOperand* base_object = UseRegisterAtStart(instr->base_object());
-  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
-  return DefineAsRegister(
-      new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses()
-      ? NULL
-      : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  if (instr->HasNoUses()) return NULL;
-
-  if (info()->IsStub()) {
-    return DefineFixed(new(zone()) LContext, esi);
-  }
-
-  return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), edi);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
-    HCallWithDescriptor* instr) {
-  CallInterfaceDescriptor descriptor = instr->descriptor();
-  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
-  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
-  ops.Add(target, zone());
-  for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op =
-        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
-    ops.Add(op, zone());
-  }
-
-  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
-      descriptor, ops, zone());
-  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
-    HTailCallThroughMegamorphicCache* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* receiver_register =
-      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
-  LOperand* name_register =
-      UseFixed(instr->name(), LoadDescriptor::NameRegister());
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
-    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
-    vector =
-        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
-  }
-
-  // Not marked as call. It can't deoptimize, and it never returns.
-  return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register, slot, vector);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseFixed(instr->function(), edi);
-  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
-  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
-  switch (instr->op()) {
-    case kMathFloor:
-      return DoMathFloor(instr);
-    case kMathRound:
-      return DoMathRound(instr);
-    case kMathFround:
-      return DoMathFround(instr);
-    case kMathAbs:
-      return DoMathAbs(instr);
-    case kMathLog:
-      return DoMathLog(instr);
-    case kMathExp:
-      return DoMathExp(instr);
-    case kMathSqrt:
-      return DoMathSqrt(instr);
-    case kMathPowHalf:
-      return DoMathPowHalf(instr);
-    case kMathClz32:
-      return DoMathClz32(instr);
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
-  LMathFloor* result = new(zone()) LMathFloor(input);
-  return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegister(instr->value());
-  LOperand* temp = FixedTemp(xmm4);
-  LMathRound* result = new(zone()) LMathRound(input, temp);
-  return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegister(instr->value());
-  LMathFround* result = new (zone()) LMathFround(input);
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
-  LOperand* context = UseAny(instr->context());  // Deferred use.
-  LOperand* input = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineSameAsFirst(new(zone()) LMathAbs(context, input));
-  Representation r = instr->value()->representation();
-  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
-  if (!r.IsDouble()) result = AssignEnvironment(result);
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  DCHECK(instr->representation().IsDouble());
-  DCHECK(instr->value()->representation().IsDouble());
-  LOperand* input = UseRegisterAtStart(instr->value());
-  return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
-  LMathClz32* result = new(zone()) LMathClz32(input);
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  DCHECK(instr->representation().IsDouble());
-  DCHECK(instr->value()->representation().IsDouble());
-  LOperand* value = UseTempRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
-  LOperand* input = UseAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMathSqrt(input));
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
-  return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* constructor = UseFixed(instr->constructor(), edi);
-  LCallNew* result = new(zone()) LCallNew(context, constructor);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* constructor = UseFixed(instr->constructor(), edi);
-  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseFixed(instr->function(), edi);
-  LCallFunction* call = new(zone()) LCallFunction(context, function);
-  return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
-  return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
-  return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
-  return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
-  return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
-    return DefineSameAsFirst(new(zone()) LBitI(left, right));
-  } else {
-    return DoArithmeticT(instr->op(), instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseRegister(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
-          dividend, divisor));
-  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
-      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
-      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
-       divisor != 1 && divisor != -1)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  DCHECK(instr->representation().IsInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseRegister(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LOperand* temp1 = FixedTemp(eax);
-  LOperand* temp2 = FixedTemp(edx);
-  LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
-          dividend, divisor, temp1, temp2), edx);
-  if (divisor == 0 ||
-      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
-      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseFixed(instr->left(), eax);
-  LOperand* divisor = UseRegister(instr->right());
-  LOperand* temp = FixedTemp(edx);
-  LInstruction* result = DefineFixed(new(zone()) LDivI(
-          dividend, divisor, temp), eax);
-  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
-      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
-      instr->CheckFlag(HValue::kCanOverflow) ||
-      !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    if (instr->RightIsPowerOf2()) {
-      return DoDivByPowerOf2I(instr);
-    } else if (instr->right()->IsConstant()) {
-      return DoDivByConstI(instr);
-    } else {
-      return DoDivI(instr);
-    }
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::DIV, instr);
-  } else {
-    return DoArithmeticT(Token::DIV, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
-  LOperand* dividend = UseRegisterAtStart(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
-          dividend, divisor));
-  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
-      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  DCHECK(instr->representation().IsInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseRegister(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LOperand* temp1 = FixedTemp(eax);
-  LOperand* temp2 = FixedTemp(edx);
-  LOperand* temp3 =
-      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
-       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
-      NULL : TempRegister();
-  LInstruction* result =
-      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
-                                                   divisor,
-                                                   temp1,
-                                                   temp2,
-                                                   temp3),
-                  edx);
-  if (divisor == 0 ||
-      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseFixed(instr->left(), eax);
-  LOperand* divisor = UseRegister(instr->right());
-  LOperand* temp = FixedTemp(edx);
-  LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
-          dividend, divisor, temp), eax);
-  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
-      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
-      instr->CheckFlag(HValue::kCanOverflow)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
-  if (instr->RightIsPowerOf2()) {
-    return DoFlooringDivByPowerOf2I(instr);
-  } else if (instr->right()->IsConstant()) {
-    return DoFlooringDivByConstI(instr);
-  } else {
-    return DoFlooringDivI(instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseRegisterAtStart(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
-          dividend, divisor));
-  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
-      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseRegister(instr->left());
-  int32_t divisor = instr->right()->GetInteger32Constant();
-  LOperand* temp1 = FixedTemp(eax);
-  LOperand* temp2 = FixedTemp(edx);
-  LInstruction* result = DefineFixed(new(zone()) LModByConstI(
-          dividend, divisor, temp1, temp2), eax);
-  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  LOperand* dividend = UseFixed(instr->left(), eax);
-  LOperand* divisor = UseRegister(instr->right());
-  LOperand* temp = FixedTemp(edx);
-  LInstruction* result = DefineFixed(new(zone()) LModI(
-          dividend, divisor, temp), edx);
-  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
-      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    if (instr->RightIsPowerOf2()) {
-      return DoModByPowerOf2I(instr);
-    } else if (instr->right()->IsConstant()) {
-      return DoModByConstI(instr);
-    } else {
-      return DoModI(instr);
-    }
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::MOD, instr);
-  } else {
-    return DoArithmeticT(Token::MOD, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseOrConstant(instr->BetterRightOperand());
-    LOperand* temp = NULL;
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      temp = TempRegister();
-    }
-    LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      AssignEnvironment(mul);
-    }
-    return DefineSameAsFirst(mul);
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::MUL, instr);
-  } else {
-    return DoArithmeticT(Token::MUL, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new(zone()) LSubI(left, right);
-    LInstruction* result = DefineSameAsFirst(sub);
-    if (instr->CheckFlag(HValue::kCanOverflow)) {
-      result = AssignEnvironment(result);
-    }
-    return result;
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::SUB, instr);
-  } else {
-    return DoArithmeticT(Token::SUB, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    // Check to see if it would be advantageous to use an lea instruction rather
-    // than an add. This is the case when no overflow check is needed and there
-    // are multiple uses of the add's inputs, so using a 3-register add will
-    // preserve all input values for later uses.
-    bool use_lea = LAddI::UseLea(instr);
-    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    HValue* right_candidate = instr->BetterRightOperand();
-    LOperand* right = use_lea
-        ? UseRegisterOrConstantAtStart(right_candidate)
-        : UseOrConstantAtStart(right_candidate);
-    LAddI* add = new(zone()) LAddI(left, right);
-    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
-    LInstruction* result = use_lea
-        ? DefineAsRegister(add)
-        : DefineSameAsFirst(add);
-    if (can_overflow) {
-      result = AssignEnvironment(result);
-    }
-    return result;
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::ADD, instr);
-  } else if (instr->representation().IsExternal()) {
-    DCHECK(instr->left()->representation().IsExternal());
-    DCHECK(instr->right()->representation().IsInteger32());
-    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
-    bool use_lea = LAddI::UseLea(instr);
-    LOperand* left = UseRegisterAtStart(instr->left());
-    HValue* right_candidate = instr->right();
-    LOperand* right = use_lea
-        ? UseRegisterOrConstantAtStart(right_candidate)
-        : UseOrConstantAtStart(right_candidate);
-    LAddI* add = new(zone()) LAddI(left, right);
-    LInstruction* result = use_lea
-        ? DefineAsRegister(add)
-        : DefineSameAsFirst(add);
-    return result;
-  } else {
-    return DoArithmeticT(Token::ADD, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
-  LOperand* left = NULL;
-  LOperand* right = NULL;
-  if (instr->representation().IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(instr->representation()));
-    DCHECK(instr->right()->representation().Equals(instr->representation()));
-    left = UseRegisterAtStart(instr->BetterLeftOperand());
-    right = UseOrConstantAtStart(instr->BetterRightOperand());
-  } else {
-    DCHECK(instr->representation().IsDouble());
-    DCHECK(instr->left()->representation().IsDouble());
-    DCHECK(instr->right()->representation().IsDouble());
-    left = UseRegisterAtStart(instr->left());
-    right = UseRegisterAtStart(instr->right());
-  }
-  LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
-  return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  DCHECK(instr->representation().IsDouble());
-  // We call a C function for double power. It can't trigger a GC.
-  // We need to use fixed result register for the call.
-  Representation exponent_type = instr->right()->representation();
-  DCHECK(instr->left()->representation().IsDouble());
-  LOperand* left = UseFixedDouble(instr->left(), xmm2);
-  LOperand* right =
-      exponent_type.IsDouble()
-          ? UseFixedDouble(instr->right(), xmm1)
-          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
-  LPower* result = new(zone()) LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
-                    CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  DCHECK(instr->left()->representation().IsSmiOrTagged());
-  DCHECK(instr->right()->representation().IsSmiOrTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), edx);
-  LOperand* right = UseFixed(instr->right(), eax);
-  LCmpT* result = new(zone()) LCmpT(context, left, right);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
-    HCompareNumericAndBranch* instr) {
-  Representation r = instr->representation();
-  if (r.IsSmiOrInteger32()) {
-    DCHECK(instr->left()->representation().Equals(r));
-    DCHECK(instr->right()->representation().Equals(r));
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
-    LOperand* right = UseOrConstantAtStart(instr->right());
-    return new(zone()) LCompareNumericAndBranch(left, right);
-  } else {
-    DCHECK(r.IsDouble());
-    DCHECK(instr->left()->representation().IsDouble());
-    DCHECK(instr->right()->representation().IsDouble());
-    LOperand* left;
-    LOperand* right;
-    if (CanBeImmediateConstant(instr->left()) &&
-        CanBeImmediateConstant(instr->right())) {
-      // The code generator requires either both inputs to be constant
-      // operands, or neither.
-      left = UseConstant(instr->left());
-      right = UseConstant(instr->right());
-    } else {
-      left = UseRegisterAtStart(instr->left());
-      right = UseRegisterAtStart(instr->right());
-    }
-    return new(zone()) LCompareNumericAndBranch(left, right);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
-    HCompareObjectEqAndBranch* instr) {
-  LOperand* left = UseRegisterAtStart(instr->left());
-  LOperand* right = UseOrConstantAtStart(instr->right());
-  return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
-    HCompareHoleAndBranch* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsSmiOrTagged());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
-    HIsUndetectableAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LIsUndetectableAndBranch(
-      UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
-    HStringCompareAndBranch* instr) {
-  DCHECK(instr->left()->representation().IsTagged());
-  DCHECK(instr->right()->representation().IsTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), edx);
-  LOperand* right = UseFixed(instr->right(), eax);
-
-  LStringCompareAndBranch* result = new(zone())
-      LStringCompareAndBranch(context, left, right);
-
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
-    HHasInstanceTypeAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasInstanceTypeAndBranch(
-      UseRegisterAtStart(instr->value()),
-      TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  DCHECK(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
-    HClassOfTestAndBranch* instr) {
-  DCHECK(instr->value()->representation().IsTagged());
-  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
-                                           TempRegister(),
-                                           TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
-  LOperand* date = UseFixed(instr->value(), eax);
-  LDateField* result =
-      new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
-  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
-  LOperand* string = UseRegisterAtStart(instr->string());
-  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
-  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
-  if (instr->encoding() == String::ONE_BYTE_ENCODING) {
-    if (FLAG_debug_code) {
-      return UseFixed(instr->value(), eax);
-    } else {
-      return UseFixedOrConstant(instr->value(), eax);
-    }
-  } else {
-    if (FLAG_debug_code) {
-      return UseRegisterAtStart(instr->value());
-    } else {
-      return UseRegisterOrConstantAtStart(instr->value());
-    }
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
-  LOperand* string = UseRegisterAtStart(instr->string());
-  LOperand* index = FLAG_debug_code
-      ? UseRegisterAtStart(instr->index())
-      : UseRegisterOrConstantAtStart(instr->index());
-  LOperand* value = GetSeqStringSetCharOperand(instr);
-  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
-  LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
-                                                       index, value);
-  if (FLAG_debug_code) {
-    result = MarkAsCall(result, instr);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  if (!FLAG_debug_code && instr->skip_check()) return NULL;
-  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
-  LOperand* length = !index->IsConstantOperand()
-      ? UseOrConstantAtStart(instr->length())
-      : UseAtStart(instr->length());
-  LInstruction* result = new(zone()) LBoundsCheck(index, length);
-  if (!FLAG_debug_code || !instr->skip_check()) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
-  // The control instruction marking the end of a block that completed
-  // abruptly (e.g., threw an exception).  There is nothing specific to do.
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
-  // All HForceRepresentation instructions should be eliminated in the
-  // representation change phase of Hydrogen.
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
-  Representation from = instr->from();
-  Representation to = instr->to();
-  HValue* val = instr->value();
-  if (from.IsSmi()) {
-    if (to.IsTagged()) {
-      LOperand* value = UseRegister(val);
-      return DefineSameAsFirst(new(zone()) LDummyUse(value));
-    }
-    from = Representation::Tagged();
-  }
-  if (from.IsTagged()) {
-    if (to.IsDouble()) {
-      LOperand* value = UseRegister(val);
-      LOperand* temp = TempRegister();
-      LInstruction* result =
-          DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
-      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
-      return result;
-    } else if (to.IsSmi()) {
-      LOperand* value = UseRegister(val);
-      if (val->type().IsSmi()) {
-        return DefineSameAsFirst(new(zone()) LDummyUse(value));
-      }
-      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
-    } else {
-      DCHECK(to.IsInteger32());
-      if (val->type().IsSmi() || val->representation().IsSmi()) {
-        LOperand* value = UseRegister(val);
-        return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
-      } else {
-        LOperand* value = UseRegister(val);
-        bool truncating = instr->CanTruncateToInt32();
-        LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL;
-        LInstruction* result =
-            DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
-        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
-        return result;
-      }
-    }
-  } else if (from.IsDouble()) {
-    if (to.IsTagged()) {
-      info()->MarkAsDeferredCalling();
-      LOperand* value = UseRegisterAtStart(val);
-      LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
-      LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
-      return AssignPointerMap(Define(result, result_temp));
-    } else if (to.IsSmi()) {
-      LOperand* value = UseRegister(val);
-      return AssignEnvironment(
-          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
-    } else {
-      DCHECK(to.IsInteger32());
-      bool truncating = instr->CanTruncateToInt32();
-      bool needs_temp = !truncating;
-      LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
-      LOperand* temp = needs_temp ? TempRegister() : NULL;
-      LInstruction* result =
-          DefineAsRegister(new(zone()) LDoubleToI(value, temp));
-      if (!truncating) result = AssignEnvironment(result);
-      return result;
-    }
-  } else if (from.IsInteger32()) {
-    info()->MarkAsDeferredCalling();
-    if (to.IsTagged()) {
-      LOperand* value = UseRegister(val);
-      if (!instr->CheckFlag(HValue::kCanOverflow)) {
-        return DefineSameAsFirst(new(zone()) LSmiTag(value));
-      } else if (val->CheckFlag(HInstruction::kUint32)) {
-        LOperand* temp = TempRegister();
-        LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
-        return AssignPointerMap(DefineSameAsFirst(result));
-      } else {
-        LOperand* temp = TempRegister();
-        LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
-        return AssignPointerMap(DefineSameAsFirst(result));
-      }
-    } else if (to.IsSmi()) {
-      LOperand* value = UseRegister(val);
-      LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
-      if (instr->CheckFlag(HValue::kCanOverflow)) {
-        result = AssignEnvironment(result);
-      }
-      return result;
-    } else {
-      DCHECK(to.IsDouble());
-      if (val->CheckFlag(HInstruction::kUint32)) {
-        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
-      } else {
-        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
-      }
-    }
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
-  LOperand* value = UseAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckNonSmi(value);
-  if (!instr->value()->type().IsHeapObject()) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
-  return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
-  // If the object is in new space, we'll emit a global cell compare and so
-  // want the value in a register.  If the object gets promoted before we
-  // emit code, we will still get the register but will do an immediate
-  // compare instead of the cell compare.  This is safe.
-  LOperand* value = instr->object_in_new_space()
-      ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
-  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
-  if (instr->HasMigrationTarget()) {
-    info()->MarkAsDeferredCalling();
-    result = AssignPointerMap(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
-  HValue* value = instr->value();
-  Representation input_rep = value->representation();
-  if (input_rep.IsDouble()) {
-    LOperand* reg = UseRegister(value);
-    return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
-  } else if (input_rep.IsInteger32()) {
-    LOperand* reg = UseFixed(value, eax);
-    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
-  } else {
-    DCHECK(input_rep.IsSmiOrTagged());
-    LOperand* reg = UseFixed(value, eax);
-    // Register allocator doesn't (yet) support allocation of double
-    // temps. Reserve xmm1 explicitly.
-    LOperand* temp = FixedTemp(xmm1);
-    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
-    return AssignEnvironment(DefineFixed(result, eax));
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
-  HValue* value = instr->value();
-  DCHECK(value->representation().IsDouble());
-  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
-  LOperand* lo = UseRegister(instr->lo());
-  LOperand* hi = UseRegister(instr->hi());
-  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
-  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
-  return new(zone()) LReturn(
-      UseFixed(instr->value(), eax), context, parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
-  Representation r = instr->representation();
-  if (r.IsSmi()) {
-    return DefineAsRegister(new(zone()) LConstantS);
-  } else if (r.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LConstantI);
-  } else if (r.IsDouble()) {
-    double value = instr->DoubleValue();
-    bool value_is_zero = bit_cast<uint64_t, double>(value) == 0;
-    LOperand* temp = value_is_zero ? NULL : TempRegister();
-    return DefineAsRegister(new(zone()) LConstantD(temp));
-  } else if (r.IsExternal()) {
-    return DefineAsRegister(new(zone()) LConstantE);
-  } else if (r.IsTagged()) {
-    return DefineAsRegister(new(zone()) LConstantT);
-  } else {
-    UNREACHABLE();
-    return NULL;
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
-  return instr->RequiresHoleCheck()
-      ? AssignEnvironment(DefineAsRegister(result))
-      : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object =
-      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
-  }
-
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  LStoreGlobalCell* result =
-      new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
-  LOperand* context = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LLoadContextSlot(context));
-  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
-  LOperand* value;
-  LOperand* temp;
-  LOperand* context = UseRegister(instr->context());
-  if (instr->NeedsWriteBarrier()) {
-    value = UseTempRegister(instr->value());
-    temp = TempRegister();
-  } else {
-    value = UseRegister(instr->value());
-    temp = NULL;
-  }
-  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
-  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
-  LOperand* obj = (instr->access().IsExternalMemory() &&
-                   instr->access().offset() == 0)
-      ? UseRegisterOrConstantAtStart(instr->object())
-      : UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
-  }
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
-      context, object, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
-    HLoadFunctionPrototype* instr) {
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
-                                         TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
-  return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  DCHECK(instr->key()->representation().IsSmiOrInteger32());
-  ElementsKind elements_kind = instr->elements_kind();
-  bool clobbers_key = ExternalArrayOpRequiresTemp(
-      instr->key()->representation(), elements_kind);
-  LOperand* key = clobbers_key
-      ? UseTempRegister(instr->key())
-      : UseRegisterOrConstantAtStart(instr->key());
-  LInstruction* result = NULL;
-
-  if (!instr->is_typed_elements()) {
-    LOperand* obj = UseRegisterAtStart(instr->elements());
-    result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
-  } else {
-    DCHECK(
-        (instr->representation().IsInteger32() &&
-         !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
-        (instr->representation().IsDouble() &&
-         (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
-    LOperand* backing_store = UseRegister(instr->elements());
-    result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
-  }
-
-  if ((instr->is_external() || instr->is_fixed_typed_array()) ?
-      // see LCodeGen::DoLoadKeyedExternalArray
-      ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
-        instr->elements_kind() == UINT32_ELEMENTS) &&
-       !instr->CheckFlag(HInstruction::kUint32)) :
-      // see LCodeGen::DoLoadKeyedFixedDoubleArray and
-      // LCodeGen::DoLoadKeyedFixedArray
-      instr->RequiresHoleCheck()) {
-    result = AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
-  }
-  LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
-  ElementsKind elements_kind = instr->elements_kind();
-
-  // Determine if we need a byte register in this case for the value.
-  bool val_is_fixed_register =
-      elements_kind == EXTERNAL_INT8_ELEMENTS ||
-      elements_kind == EXTERNAL_UINT8_ELEMENTS ||
-      elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
-      elements_kind == UINT8_ELEMENTS ||
-      elements_kind == INT8_ELEMENTS ||
-      elements_kind == UINT8_CLAMPED_ELEMENTS;
-  if (val_is_fixed_register) {
-    return UseFixed(instr->value(), eax);
-  }
-
-  return UseRegister(instr->value());
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
-  if (!instr->is_typed_elements()) {
-    DCHECK(instr->elements()->representation().IsTagged());
-    DCHECK(instr->key()->representation().IsInteger32() ||
-           instr->key()->representation().IsSmi());
-
-    if (instr->value()->representation().IsDouble()) {
-      LOperand* object = UseRegisterAtStart(instr->elements());
-      LOperand* val = NULL;
-      val = UseRegisterAtStart(instr->value());
-      LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-      return new(zone()) LStoreKeyed(object, key, val);
-    } else {
-      DCHECK(instr->value()->representation().IsSmiOrTagged());
-      bool needs_write_barrier = instr->NeedsWriteBarrier();
-
-      LOperand* obj = UseRegister(instr->elements());
-      LOperand* val;
-      LOperand* key;
-      if (needs_write_barrier) {
-        val = UseTempRegister(instr->value());
-        key = UseTempRegister(instr->key());
-      } else {
-        val = UseRegisterOrConstantAtStart(instr->value());
-        key = UseRegisterOrConstantAtStart(instr->key());
-      }
-      return new(zone()) LStoreKeyed(obj, key, val);
-    }
-  }
-
-  ElementsKind elements_kind = instr->elements_kind();
-  DCHECK(
-      (instr->value()->representation().IsInteger32() &&
-       !IsDoubleOrFloatElementsKind(elements_kind)) ||
-      (instr->value()->representation().IsDouble() &&
-       IsDoubleOrFloatElementsKind(elements_kind)));
-  DCHECK((instr->is_fixed_typed_array() &&
-          instr->elements()->representation().IsTagged()) ||
-         (instr->is_external() &&
-          instr->elements()->representation().IsExternal()));
-
-  LOperand* backing_store = UseRegister(instr->elements());
-  LOperand* val = GetStoreKeyedValueOperand(instr);
-  bool clobbers_key = ExternalArrayOpRequiresTemp(
-      instr->key()->representation(), elements_kind);
-  LOperand* key = clobbers_key
-      ? UseTempRegister(instr->key())
-      : UseRegisterOrConstantAtStart(instr->key());
-  return new(zone()) LStoreKeyed(backing_store, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  DCHECK(instr->object()->representation().IsTagged());
-  DCHECK(instr->key()->representation().IsTagged());
-  DCHECK(instr->value()->representation().IsTagged());
-
-  LStoreKeyedGeneric* result =
-      new(zone()) LStoreKeyedGeneric(context, object, key, value);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
-    HTransitionElementsKind* instr) {
-  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
-    LOperand* object = UseRegister(instr->object());
-    LOperand* new_map_reg = TempRegister();
-    LOperand* temp_reg = TempRegister();
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, NULL,
-                                            new_map_reg, temp_reg);
-    return result;
-  } else {
-    LOperand* object = UseFixed(instr->object(), eax);
-    LOperand* context = UseFixed(instr->context(), esi);
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
-    return MarkAsCall(result, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
-    HTrapAllocationMemento* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* temp = TempRegister();
-  LTrapAllocationMemento* result =
-      new(zone()) LTrapAllocationMemento(object, temp);
-  return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
-  bool is_in_object = instr->access().IsInobject();
-  bool is_external_location = instr->access().IsExternalMemory() &&
-      instr->access().offset() == 0;
-  bool needs_write_barrier = instr->NeedsWriteBarrier();
-  bool needs_write_barrier_for_map = instr->has_transition() &&
-      instr->NeedsWriteBarrierForMap();
-
-  LOperand* obj;
-  if (needs_write_barrier) {
-    obj = is_in_object
-        ? UseRegister(instr->object())
-        : UseTempRegister(instr->object());
-  } else if (is_external_location) {
-    DCHECK(!is_in_object);
-    DCHECK(!needs_write_barrier);
-    DCHECK(!needs_write_barrier_for_map);
-    obj = UseRegisterOrConstant(instr->object());
-  } else {
-    obj = needs_write_barrier_for_map
-        ? UseRegister(instr->object())
-        : UseRegisterAtStart(instr->object());
-  }
-
-  bool can_be_constant = instr->value()->IsConstant() &&
-      HConstant::cast(instr->value())->NotInNewSpace() &&
-      !instr->field_representation().IsDouble();
-
-  LOperand* val;
-  if (instr->field_representation().IsInteger8() ||
-      instr->field_representation().IsUInteger8()) {
-    // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
-    // Just force the value to be in eax and we're safe here.
-    val = UseFixed(instr->value(), eax);
-  } else if (needs_write_barrier) {
-    val = UseTempRegister(instr->value());
-  } else if (can_be_constant) {
-    val = UseRegisterOrConstant(instr->value());
-  } else if (instr->field_representation().IsDouble()) {
-    val = UseRegisterAtStart(instr->value());
-  } else {
-    val = UseRegister(instr->value());
-  }
-
-  // We only need a scratch register if we have a write barrier or we
-  // have a store into the properties array (not in-object-property).
-  LOperand* temp = (!is_in_object || needs_write_barrier ||
-                    needs_write_barrier_for_map) ? TempRegister() : NULL;
-
-  // We need a temporary register for write barrier of the map field.
-  LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
-
-  return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object =
-      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
-  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
-  LStoreNamedGeneric* result =
-      new(zone()) LStoreNamedGeneric(context, object, value);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), edx);
-  LOperand* right = UseFixed(instr->right(), eax);
-  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
-  return MarkAsCall(DefineFixed(string_add, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseTempRegister(instr->string());
-  LOperand* index = UseTempRegister(instr->index());
-  LOperand* context = UseAny(instr->context());
-  LStringCharCodeAt* result =
-      new(zone()) LStringCharCodeAt(context, string, index);
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
-  LOperand* char_code = UseRegister(instr->value());
-  LOperand* context = UseAny(instr->context());
-  LStringCharFromCode* result =
-      new(zone()) LStringCharFromCode(context, char_code);
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
-  LOperand* temp = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp);
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  DCHECK(argument_count_ == 0);
-  allocator_->MarkAsOsrEntry();
-  current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  LParameter* result = new(zone()) LParameter;
-  if (instr->kind() == HParameter::STACK_PARAMETER) {
-    int spill_index = chunk()->GetParameterStackSlot(instr->index());
-    return DefineAsSpilled(result, spill_index);
-  } else {
-    DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
-    int index = static_cast<int>(instr->index());
-    Register reg = descriptor.GetEnvironmentParameterRegister(index);
-    return DefineFixed(result, reg);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
-  // Use an index that corresponds to the location in the unoptimized frame,
-  // which the optimized frame will subsume.
-  int env_index = instr->index();
-  int spill_index = 0;
-  if (instr->environment()->is_parameter_index(env_index)) {
-    spill_index = chunk()->GetParameterStackSlot(env_index);
-  } else {
-    spill_index = env_index - instr->environment()->first_local_index();
-    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Retry(kNotEnoughSpillSlotsForOsr);
-      spill_index = 0;
-    }
-    if (spill_index == 0) {
-      // The dynamic frame alignment state overwrites the first local.
-      // The first local is saved at the end of the unoptimized frame.
-      spill_index = graph()->osr()->UnoptimizedFrameSlots();
-    }
-  }
-  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LCallStub* result = new(zone()) LCallStub(context);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
-  // There are no real uses of the arguments object.
-  // arguments.length and element access are supported directly on
-  // stack arguments, and any real arguments object use causes a bailout.
-  // So this value is never used.
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
-  instr->ReplayEnvironment(current_block_->last_environment());
-
-  // There are no real uses of a captured object.
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
-  info()->MarkAsRequiresFrame();
-  LOperand* args = UseRegister(instr->arguments());
-  LOperand* length;
-  LOperand* index;
-  if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
-    length = UseRegisterOrConstant(instr->length());
-    index = UseOrConstant(instr->index());
-  } else {
-    length = UseTempRegister(instr->length());
-    index = Use(instr->index());
-  }
-  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* value = UseAtStart(instr->value());
-  LTypeof* result = new(zone()) LTypeof(context, value);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
-    HIsConstructCallAndBranch* instr) {
-  return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
-  instr->ReplayEnvironment(current_block_->last_environment());
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  info()->MarkAsDeferredCalling();
-  if (instr->is_function_entry()) {
-    LOperand* context = UseFixed(instr->context(), esi);
-    return MarkAsCall(new(zone()) LStackCheck(context), instr);
-  } else {
-    DCHECK(instr->is_backwards_branch());
-    LOperand* context = UseAny(instr->context());
-    return AssignEnvironment(
-        AssignPointerMap(new(zone()) LStackCheck(context)));
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment();
-  outer->set_ast_id(instr->ReturnId());
-  HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
-  // Only replay binding of arguments object if it wasn't removed from graph.
-  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
-    inner->Bind(instr->arguments_var(), instr->arguments_object());
-  }
-  inner->BindContext(instr->closure_context());
-  inner->set_entry(instr);
-  current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedClosure(instr->closure());
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  LInstruction* pop = NULL;
-
-  HEnvironment* env = current_block_->last_environment();
-
-  if (env->entry()->arguments_pushed()) {
-    int argument_count = env->arguments_environment()->parameter_count();
-    pop = new(zone()) LDrop(argument_count);
-    DCHECK(instr->argument_delta() == -argument_count);
-  }
-
-  HEnvironment* outer = current_block_->last_environment()->
-      DiscardInlined(false);
-  current_block_->UpdateEnvironment(outer);
-  return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->enumerable(), eax);
-  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
-  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
-  LOperand* map = UseRegister(instr->map());
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* map = UseRegisterAtStart(instr->map());
-  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* index = UseTempRegister(instr->index());
-  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
-  LInstruction* result = DefineSameAsFirst(load);
-  return AssignPointerMap(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, esi), instr);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
deleted file mode 100644
index 49eba66..0000000
--- a/src/ia32/lithium-ia32.h
+++ /dev/null
@@ -1,2881 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
-
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-class RCodeVisualizer;
-}
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
-  V(AccessArgumentsAt)                       \
-  V(AddI)                                    \
-  V(AllocateBlockContext)                    \
-  V(Allocate)                                \
-  V(ApplyArguments)                          \
-  V(ArgumentsElements)                       \
-  V(ArgumentsLength)                         \
-  V(ArithmeticD)                             \
-  V(ArithmeticT)                             \
-  V(BitI)                                    \
-  V(BoundsCheck)                             \
-  V(Branch)                                  \
-  V(CallJSFunction)                          \
-  V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
-  V(CallNew)                                 \
-  V(CallNewArray)                            \
-  V(CallRuntime)                             \
-  V(CallStub)                                \
-  V(CheckInstanceType)                       \
-  V(CheckMaps)                               \
-  V(CheckMapValue)                           \
-  V(CheckNonSmi)                             \
-  V(CheckSmi)                                \
-  V(CheckValue)                              \
-  V(ClampDToUint8)                           \
-  V(ClampIToUint8)                           \
-  V(ClampTToUint8)                           \
-  V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
-  V(CompareNumericAndBranch)                 \
-  V(CmpObjectEqAndBranch)                    \
-  V(CmpHoleAndBranch)                        \
-  V(CmpMapAndBranch)                         \
-  V(CmpT)                                    \
-  V(ConstantD)                               \
-  V(ConstantE)                               \
-  V(ConstantI)                               \
-  V(ConstantS)                               \
-  V(ConstantT)                               \
-  V(ConstructDouble)                         \
-  V(Context)                                 \
-  V(DateField)                               \
-  V(DebugBreak)                              \
-  V(DeclareGlobals)                          \
-  V(Deoptimize)                              \
-  V(DivByConstI)                             \
-  V(DivByPowerOf2I)                          \
-  V(DivI)                                    \
-  V(DoubleBits)                              \
-  V(DoubleToI)                               \
-  V(DoubleToSmi)                             \
-  V(Drop)                                    \
-  V(Dummy)                                   \
-  V(DummyUse)                                \
-  V(FlooringDivByConstI)                     \
-  V(FlooringDivByPowerOf2I)                  \
-  V(FlooringDivI)                            \
-  V(ForInCacheArray)                         \
-  V(ForInPrepareMap)                         \
-  V(FunctionLiteral)                         \
-  V(GetCachedArrayIndex)                     \
-  V(Goto)                                    \
-  V(HasCachedArrayIndexAndBranch)            \
-  V(HasInstanceTypeAndBranch)                \
-  V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
-  V(InstanceOfKnownGlobal)                   \
-  V(InstructionGap)                          \
-  V(Integer32ToDouble)                       \
-  V(InvokeFunction)                          \
-  V(IsConstructCallAndBranch)                \
-  V(IsObjectAndBranch)                       \
-  V(IsStringAndBranch)                       \
-  V(IsSmiAndBranch)                          \
-  V(IsUndetectableAndBranch)                 \
-  V(Label)                                   \
-  V(LazyBailout)                             \
-  V(LoadContextSlot)                         \
-  V(LoadFieldByIndex)                        \
-  V(LoadFunctionPrototype)                   \
-  V(LoadGlobalCell)                          \
-  V(LoadGlobalGeneric)                       \
-  V(LoadKeyed)                               \
-  V(LoadKeyedGeneric)                        \
-  V(LoadNamedField)                          \
-  V(LoadNamedGeneric)                        \
-  V(LoadRoot)                                \
-  V(MapEnumLength)                           \
-  V(MathAbs)                                 \
-  V(MathClz32)                               \
-  V(MathExp)                                 \
-  V(MathFloor)                               \
-  V(MathFround)                              \
-  V(MathLog)                                 \
-  V(MathMinMax)                              \
-  V(MathPowHalf)                             \
-  V(MathRound)                               \
-  V(MathSqrt)                                \
-  V(ModByConstI)                             \
-  V(ModByPowerOf2I)                          \
-  V(ModI)                                    \
-  V(MulI)                                    \
-  V(NumberTagD)                              \
-  V(NumberTagI)                              \
-  V(NumberTagU)                              \
-  V(NumberUntagD)                            \
-  V(OsrEntry)                                \
-  V(Parameter)                               \
-  V(Power)                                   \
-  V(PushArgument)                            \
-  V(RegExpLiteral)                           \
-  V(Return)                                  \
-  V(SeqStringGetChar)                        \
-  V(SeqStringSetChar)                        \
-  V(ShiftI)                                  \
-  V(SmiTag)                                  \
-  V(SmiUntag)                                \
-  V(StackCheck)                              \
-  V(StoreCodeEntry)                          \
-  V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
-  V(StoreGlobalCell)                         \
-  V(StoreKeyed)                              \
-  V(StoreKeyedGeneric)                       \
-  V(StoreNamedField)                         \
-  V(StoreNamedGeneric)                       \
-  V(StringAdd)                               \
-  V(StringCharCodeAt)                        \
-  V(StringCharFromCode)                      \
-  V(StringCompareAndBranch)                  \
-  V(SubI)                                    \
-  V(TaggedToI)                               \
-  V(TailCallThroughMegamorphicCache)         \
-  V(ThisFunction)                            \
-  V(ToFastProperties)                        \
-  V(TransitionElementsKind)                  \
-  V(TrapAllocationMemento)                   \
-  V(Typeof)                                  \
-  V(TypeofIsAndBranch)                       \
-  V(Uint32ToDouble)                          \
-  V(UnknownOSRValue)                         \
-  V(WrapReceiver)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
-  Opcode opcode() const FINAL { return LInstruction::k##type; } \
-  void CompileToNative(LCodeGen* generator) FINAL;              \
-  const char* Mnemonic() const FINAL { return mnemonic; }       \
-  static L##type* cast(LInstruction* instr) {                   \
-    DCHECK(instr->Is##type());                                  \
-    return reinterpret_cast<L##type*>(instr);                   \
-  }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type)     \
-  H##type* hydrogen() const {               \
-    return H##type::cast(hydrogen_value()); \
-  }
-
-
-class LInstruction : public ZoneObject {
- public:
-  LInstruction()
-      : environment_(NULL),
-        hydrogen_value_(NULL),
-        bit_field_(IsCallBits::encode(false)) {
-  }
-
-  virtual ~LInstruction() {}
-
-  virtual void CompileToNative(LCodeGen* generator) = 0;
-  virtual const char* Mnemonic() const = 0;
-  virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
-  enum Opcode {
-    // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
-    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
-    kNumberOfInstructions
-#undef DECLARE_OPCODE
-  };
-
-  virtual Opcode opcode() const = 0;
-
-  // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
-  bool Is##type() const { return opcode() == k##type; }
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
-  // Declare virtual predicates for instructions that don't have
-  // an opcode.
-  virtual bool IsGap() const { return false; }
-
-  virtual bool IsControl() const { return false; }
-
-  // Try deleting this instruction if possible.
-  virtual bool TryDelete() { return false; }
-
-  void set_environment(LEnvironment* env) { environment_ = env; }
-  LEnvironment* environment() const { return environment_; }
-  bool HasEnvironment() const { return environment_ != NULL; }
-
-  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
-  LPointerMap* pointer_map() const { return pointer_map_.get(); }
-  bool HasPointerMap() const { return pointer_map_.is_set(); }
-
-  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
-  HValue* hydrogen_value() const { return hydrogen_value_; }
-
-  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
-  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
-  bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
-  // Interface to the register allocator and iterators.
-  bool ClobbersTemps() const { return IsCall(); }
-  bool ClobbersRegisters() const { return IsCall(); }
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
-    return IsCall();
-  }
-
-  virtual bool HasResult() const = 0;
-  virtual LOperand* result() const = 0;
-
-  bool HasDoubleRegisterResult();
-  bool HasDoubleRegisterInput();
-
-  LOperand* FirstInput() { return InputAt(0); }
-  LOperand* Output() { return HasResult() ? result() : NULL; }
-
-  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
-  void VerifyCall();
-#endif
-
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
-
- private:
-  // Iterator support.
-  friend class InputIterator;
-
-  friend class TempIterator;
-  virtual int TempCount() = 0;
-  virtual LOperand* TempAt(int i) = 0;
-
-  class IsCallBits: public BitField<bool, 0, 1> {};
-
-  LEnvironment* environment_;
-  SetOncePointer<LPointerMap> pointer_map_;
-  HValue* hydrogen_value_;
-  int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
-  // Allow 0 or 1 output operands.
-  STATIC_ASSERT(R == 0 || R == 1);
-  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
-  void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const OVERRIDE { return results_[0]; }
-
- protected:
-  EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
-  EmbeddedContainer<LOperand*, I> inputs_;
-  EmbeddedContainer<LOperand*, T> temps_;
-
- private:
-  // Iterator support.
-  int InputCount() FINAL { return I; }
-  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
-
-  int TempCount() FINAL { return T; }
-  LOperand* TempAt(int i) FINAL { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
-  explicit LGap(HBasicBlock* block) : block_(block) {
-    parallel_moves_[BEFORE] = NULL;
-    parallel_moves_[START] = NULL;
-    parallel_moves_[END] = NULL;
-    parallel_moves_[AFTER] = NULL;
-  }
-
-  // Can't use the DECLARE-macro here because of sub-classes.
-  bool IsGap() const FINAL { return true; }
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-  static LGap* cast(LInstruction* instr) {
-    DCHECK(instr->IsGap());
-    return reinterpret_cast<LGap*>(instr);
-  }
-
-  bool IsRedundant() const;
-
-  HBasicBlock* block() const { return block_; }
-
-  enum InnerPosition {
-    BEFORE,
-    START,
-    END,
-    AFTER,
-    FIRST_INNER_POSITION = BEFORE,
-    LAST_INNER_POSITION = AFTER
-  };
-
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
-    if (parallel_moves_[pos] == NULL) {
-      parallel_moves_[pos] = new(zone) LParallelMove(zone);
-    }
-    return parallel_moves_[pos];
-  }
-
-  LParallelMove* GetParallelMove(InnerPosition pos)  {
-    return parallel_moves_[pos];
-  }
-
- private:
-  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
-  HBasicBlock* block_;
-};
-
-
-class LInstructionGap FINAL : public LGap {
- public:
-  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return !IsRedundant();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  explicit LGoto(HBasicBlock* block) : block_(block) { }
-
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
-  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-  bool IsControl() const OVERRIDE { return true; }
-
-  int block_id() const { return block_->block_id(); }
-  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
-    return false;
-  }
-
-  bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
-
- private:
-  HBasicBlock* block_;
-};
-
-
-class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-};
-
-
-class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  LDummy() {}
-  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LDummyUse(LOperand* value) {
-    inputs_[0] = value;
-  }
-  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  bool IsControl() const OVERRIDE { return true; }
-  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel FINAL : public LGap {
- public:
-  explicit LLabel(HBasicBlock* block)
-      : LGap(block), replacement_(NULL) { }
-
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
-  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int block_id() const { return block()->block_id(); }
-  bool is_loop_header() const { return block()->IsLoopHeader(); }
-  bool is_osr_entry() const { return block()->is_osr_entry(); }
-  Label* label() { return &label_; }
-  LLabel* replacement() const { return replacement_; }
-  void set_replacement(LLabel* label) { replacement_ = label; }
-  bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
-  Label label_;
-  LLabel* replacement_;
-};
-
-
-class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
-  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
-class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 5, 0> {
- public:
-  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
-                                   LOperand* name, LOperand* slot,
-                                   LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = receiver;
-    inputs_[2] = name;
-    inputs_[3] = slot;
-    inputs_[4] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* receiver() { return inputs_[1]; }
-  LOperand* name() { return inputs_[2]; }
-  LOperand* slot() { return inputs_[3]; }
-  LOperand* vector() { return inputs_[4]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
-                               "tail-call-through-megamorphic-cache")
-  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
-class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
-  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
-  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
-  bool IsControl() const FINAL { return true; }
-
-  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
-  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
-  int TrueDestination(LChunk* chunk) {
-    return chunk->LookupDestination(true_block_id());
-  }
-  int FalseDestination(LChunk* chunk) {
-    return chunk->LookupDestination(false_block_id());
-  }
-
-  Label* TrueLabel(LChunk* chunk) {
-    if (true_label_ == NULL) {
-      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
-    }
-    return true_label_;
-  }
-  Label* FalseLabel(LChunk* chunk) {
-    if (false_label_ == NULL) {
-      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
-    }
-    return false_label_;
-  }
-
- protected:
-  int true_block_id() { return SuccessorAt(0)->block_id(); }
-  int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
-  HControlInstruction* hydrogen() {
-    return HControlInstruction::cast(this->hydrogen_value());
-  }
-
-  Label* false_label_;
-  Label* true_label_;
-};
-
-
-class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LWrapReceiver(LOperand* receiver,
-                LOperand* function,
-                LOperand* temp) {
-    inputs_[0] = receiver;
-    inputs_[1] = function;
-    temps_[0] = temp;
-  }
-
-  LOperand* receiver() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-};
-
-
-class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
- public:
-  LApplyArguments(LOperand* function,
-                  LOperand* receiver,
-                  LOperand* length,
-                  LOperand* elements) {
-    inputs_[0] = function;
-    inputs_[1] = receiver;
-    inputs_[2] = length;
-    inputs_[3] = elements;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-  LOperand* receiver() { return inputs_[1]; }
-  LOperand* length() { return inputs_[2]; }
-  LOperand* elements() { return inputs_[3]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-};
-
-
-class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
-    inputs_[0] = arguments;
-    inputs_[1] = length;
-    inputs_[2] = index;
-  }
-
-  LOperand* arguments() { return inputs_[0]; }
-  LOperand* length() { return inputs_[1]; }
-  LOperand* index() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LArgumentsLength(LOperand* elements) {
-    inputs_[0] = elements;
-  }
-
-  LOperand* elements() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
-  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
-  LModByConstI(LOperand* dividend,
-               int32_t divisor,
-               LOperand* temp1,
-               LOperand* temp2) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LModI(LOperand* left, LOperand* right, LOperand* temp) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-    temps_[0] = temp;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
-  DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
-  LDivByConstI(LOperand* dividend,
-               int32_t divisor,
-               LOperand* temp1,
-               LOperand* temp2) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
-  DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
-    inputs_[0] = dividend;
-    inputs_[1] = divisor;
-    temps_[0] = temp;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  LOperand* divisor() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
-  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
-                               "flooring-div-by-power-of-2-i")
-  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
-  LFlooringDivByConstI(LOperand* dividend,
-                       int32_t divisor,
-                       LOperand* temp1,
-                       LOperand* temp2,
-                       LOperand* temp3) {
-    inputs_[0] = dividend;
-    divisor_ = divisor;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = temp3;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  int32_t divisor() const { return divisor_; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-  LOperand* temp3() { return temps_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
-  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
-  int32_t divisor_;
-};
-
-
-class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
-    inputs_[0] = dividend;
-    inputs_[1] = divisor;
-    temps_[0] = temp;
-  }
-
-  LOperand* dividend() { return inputs_[0]; }
-  LOperand* divisor() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
-  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-    temps_[0] = temp;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
- public:
-  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
-                               "compare-numeric-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
-  Token::Value op() const { return hydrogen()->token(); }
-  bool is_double() const {
-    return hydrogen()->representation().IsDouble();
-  }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathFloor(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LMathRound(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* temp() { return temps_[0]; }
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LMathAbs(LOperand* context, LOperand* value) {
-    inputs_[1] = context;
-    inputs_[0] = value;
-  }
-
-  LOperand* context() { return inputs_[1]; }
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathLog(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathClz32(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-
-class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
-  LMathExp(LOperand* value,
-           LOperand* temp1,
-           LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    ExternalReference::InitializeMathExpData();
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathSqrt(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LMathPowHalf(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
- public:
-  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
-};
-
-
-class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
- public:
-  explicit LCmpHoleAndBranch(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
-class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LIsStringAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
- public:
-  explicit LIsSmiAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
-                               "is-undetectable-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
- public:
-  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() { return inputs_[1]; }
-  LOperand* left() { return inputs_[1]; }
-  LOperand* right() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
-                               "string-compare-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
-                               "has-instance-type-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch FINAL
-    : public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
- public:
-  explicit LIsConstructCallAndBranch(LOperand* temp) {
-    temps_[0] = temp;
-  }
-
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
-                               "is-construct-call-and-branch")
-};
-
-
-class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
- public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-    temps_[1] = temp2;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
-                               "class-of-test-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
-  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
-  LOperand* context() { return inputs_[0]; }
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
-    inputs_[0] = context;
-    inputs_[1] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
-                               "instance-of-known-global")
-  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
-  Handle<JSFunction> function() const { return hydrogen()->function(); }
-  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
-    return lazy_deopt_env_;
-  }
-  virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) OVERRIDE {
-    lazy_deopt_env_ = env;
-  }
-
- private:
-  LEnvironment* lazy_deopt_env_;
-};
-
-
-class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
-  LBoundsCheck(LOperand* index, LOperand* length) {
-    inputs_[0] = index;
-    inputs_[1] = length;
-  }
-
-  LOperand* index() { return inputs_[0]; }
-  LOperand* length() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
-  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LBitI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
-  Token::Value op() const { return hydrogen()->op(); }
-};
-
-
-class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
-      : op_(op), can_deopt_(can_deopt) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
-  Token::Value op() const { return op_; }
-  bool can_deopt() const { return can_deopt_; }
-
- private:
-  Token::Value op_;
-  bool can_deopt_;
-};
-
-
-class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LSubI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
-  DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
- public:
-  explicit LConstantD(LOperand* temp) {
-    temps_[0] = temp;
-  }
-
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  ExternalReference value() const {
-    return hydrogen()->ExternalReferenceValue();
-  }
-};
-
-
-class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  Handle<Object> value(Isolate* isolate) const {
-    return hydrogen()->handle(isolate);
-  }
-};
-
-
-class LBranch FINAL : public LControlInstruction<1, 1> {
- public:
-  LBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-  DECLARE_HYDROGEN_ACCESSOR(Branch)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
- public:
-  explicit LCmpMapAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
-  Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LDateField(LOperand* date, LOperand* temp, Smi* index)
-      : index_(index) {
-    inputs_[0] = date;
-    temps_[0] = temp;
-  }
-
-  LOperand* date() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
-  DECLARE_HYDROGEN_ACCESSOR(DateField)
-
-  Smi* index() const { return index_; }
-
- private:
-  Smi* index_;
-};
-
-
-class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LSeqStringGetChar(LOperand* string, LOperand* index) {
-    inputs_[0] = string;
-    inputs_[1] = index;
-  }
-
-  LOperand* string() const { return inputs_[0]; }
-  LOperand* index() const { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
-  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
- public:
-  LSeqStringSetChar(LOperand* context,
-                    LOperand* string,
-                    LOperand* index,
-                    LOperand* value) {
-    inputs_[0] = context;
-    inputs_[1] = string;
-    inputs_[2] = index;
-    inputs_[3] = value;
-  }
-
-  LOperand* string() { return inputs_[1]; }
-  LOperand* index() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
-  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LAddI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  static bool UseLea(HAdd* add) {
-    return !add->CheckFlag(HValue::kCanOverflow) &&
-        add->BetterLeftOperand()->UseCount() > 1;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
-  DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LMathMinMax(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
-  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LPower(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
-  DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  Token::Value op() const { return op_; }
-
-  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
-  void CompileToNative(LCodeGen* generator) OVERRIDE;
-  const char* Mnemonic() const OVERRIDE;
-
- private:
-  Token::Value op_;
-};
-
-
-class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LArithmeticT(Token::Value op,
-               LOperand* context,
-               LOperand* left,
-               LOperand* right)
-      : op_(op) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* left() { return inputs_[1]; }
-  LOperand* right() { return inputs_[2]; }
-
-  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
-  void CompileToNative(LCodeGen* generator) OVERRIDE;
-  const char* Mnemonic() const OVERRIDE;
-
-  Token::Value op() const { return op_; }
-
- private:
-  Token::Value op_;
-};
-
-
-class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
-  explicit LReturn(LOperand* value,
-                   LOperand* context,
-                   LOperand* parameter_count) {
-    inputs_[0] = value;
-    inputs_[1] = context;
-    inputs_[2] = parameter_count;
-  }
-
-  bool has_constant_parameter_count() {
-    return parameter_count()->IsConstantOperand();
-  }
-  LConstantOperand* constant_parameter_count() {
-    DCHECK(has_constant_parameter_count());
-    return LConstantOperand::cast(parameter_count());
-  }
-  LOperand* parameter_count() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-  DECLARE_HYDROGEN_ACCESSOR(Return)
-};
-
-
-class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadNamedField(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
-    inputs_[0] = function;
-    temps_[0] = temp;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
-  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
-  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
-  Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadKeyed(LOperand* elements, LOperand* key) {
-    inputs_[0] = elements;
-    inputs_[1] = key;
-  }
-  LOperand* elements() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  ElementsKind elements_kind() const {
-    return hydrogen()->elements_kind();
-  }
-  bool is_external() const {
-    return hydrogen()->is_external();
-  }
-  bool is_fixed_typed_array() const {
-    return hydrogen()->is_fixed_typed_array();
-  }
-  bool is_typed_elements() const {
-    return is_external() || is_fixed_typed_array();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-  uint32_t base_offset() const { return hydrogen()->base_offset(); }
-  bool key_is_smi() {
-    return hydrogen()->key()->representation().IsTagged();
-  }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
-    Representation key_representation,
-    ElementsKind elements_kind) {
-  // Operations that require the key to be divided by two to be converted into
-  // an index cannot fold the scale operation into a load and need an extra
-  // temp register to do the work.
-  return key_representation.IsSmi() &&
-      (elements_kind == EXTERNAL_INT8_ELEMENTS ||
-       elements_kind == EXTERNAL_UINT8_ELEMENTS ||
-       elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
-       elements_kind == UINT8_ELEMENTS ||
-       elements_kind == INT8_ELEMENTS ||
-       elements_kind == UINT8_CLAMPED_ELEMENTS);
-}
-
-
-class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
- public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
-                    LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = obj;
-    inputs_[2] = key;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
-                     LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = global_object;
-    temps_[0] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* global_object() { return inputs_[1]; }
-  LOperand* temp_vector() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreGlobalCell(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadContextSlot(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
-  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
-  int slot_index() { return hydrogen()->slot_index(); }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
- public:
-  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
-    inputs_[0] = context;
-    inputs_[1] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
-  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
-  int slot_index() { return hydrogen()->slot_index(); }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LPushArgument(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  explicit LDrop(int count) : count_(count) { }
-
-  int count() const { return count_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
-  int count_;
-};
-
-
-class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
- public:
-  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
-    inputs_[0] = function;
-    inputs_[1] = code_object;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-  LOperand* code_object() { return inputs_[1]; }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
-  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
- public:
-  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
-    inputs_[0] = base_object;
-    inputs_[1] = offset;
-  }
-
-  LOperand* base_object() const { return inputs_[0]; }
-  LOperand* offset() const { return inputs_[1]; }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
-  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
-  DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LDeclareGlobals(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
-  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
- public:
-  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
-                      const ZoneList<LOperand*>& operands, Zone* zone)
-      : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
-    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
-    inputs_.AddAll(operands, zone);
-  }
-
-  LOperand* target() const { return inputs_[0]; }
-
-  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- private:
-  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-
-  ZoneList<LOperand*> inputs_;
-
-  // Iterator support.
-  int InputCount() FINAL { return inputs_.length(); }
-  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
-
-  int TempCount() FINAL { return 0; }
-  LOperand* TempAt(int i) FINAL { return NULL; }
-};
-
-
-class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LInvokeFunction(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
-  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  explicit LCallFunction(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LCallNew(LOperand* context, LOperand* constructor) {
-    inputs_[0] = context;
-    inputs_[1] = constructor;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* constructor() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
-  DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LCallNewArray(LOperand* context, LOperand* constructor) {
-    inputs_[0] = context;
-    inputs_[1] = constructor;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* constructor() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
-  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallRuntime(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
-  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
-  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
-    return save_doubles() == kDontSaveFPRegs;
-  }
-
-  const Runtime::Function* function() const { return hydrogen()->function(); }
-  int arity() const { return hydrogen()->argument_count(); }
-  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LInteger32ToDouble(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LUint32ToDouble(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LNumberTagI(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LNumberTagU(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LNumberTagD(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LDoubleToI(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
-  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LDoubleToSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LTaggedToI(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
-
-  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LSmiTag(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  explicit LNumberUntagD(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
-  DECLARE_HYDROGEN_ACCESSOR(Change);
-};
-
-
-class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  LSmiUntag(LOperand* value, bool needs_check)
-      : needs_check_(needs_check) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
-  bool needs_check() const { return needs_check_; }
-
- private:
-  bool needs_check_;
-};
-
-
-class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
- public:
-  LStoreNamedField(LOperand* obj,
-                   LOperand* val,
-                   LOperand* temp,
-                   LOperand* temp_map) {
-    inputs_[0] = obj;
-    inputs_[1] = val;
-    temps_[0] = temp;
-    temps_[1] = temp_map;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-  LOperand* temp_map() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = value;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-  Handle<Object> name() const { return hydrogen()->name(); }
-  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
-};
-
-
-class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
-    inputs_[2] = val;
-  }
-
-  bool is_external() const { return hydrogen()->is_external(); }
-  bool is_fixed_typed_array() const {
-    return hydrogen()->is_fixed_typed_array();
-  }
-  bool is_typed_elements() const {
-    return is_external() || is_fixed_typed_array();
-  }
-  LOperand* elements() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  ElementsKind elements_kind() const {
-    return hydrogen()->elements_kind();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-  uint32_t base_offset() const { return hydrogen()->base_offset(); }
-  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
- public:
-  LStoreKeyedGeneric(LOperand* context,
-                     LOperand* object,
-                     LOperand* key,
-                     LOperand* value) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-    inputs_[2] = key;
-    inputs_[3] = value;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-  LOperand* key() { return inputs_[2]; }
-  LOperand* value() { return inputs_[3]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
-};
-
-
-class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
- public:
-  LTransitionElementsKind(LOperand* object,
-                          LOperand* context,
-                          LOperand* new_map_temp,
-                          LOperand* temp) {
-    inputs_[0] = object;
-    inputs_[1] = context;
-    temps_[0] = new_map_temp;
-    temps_[1] = temp;
-  }
-
-  LOperand* context() { return inputs_[1]; }
-  LOperand* object() { return inputs_[0]; }
-  LOperand* new_map_temp() { return temps_[0]; }
-  LOperand* temp() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
-                               "transition-elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-
-  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
-  Handle<Map> transitioned_map() {
-    return hydrogen()->transitioned_map().handle();
-  }
-  ElementsKind from_kind() { return hydrogen()->from_kind(); }
-  ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento FINAL  : public LTemplateInstruction<0, 1, 1> {
- public:
-  LTrapAllocationMemento(LOperand* object,
-                         LOperand* temp) {
-    inputs_[0] = object;
-    temps_[0] = temp;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
-                               "trap-allocation-memento")
-};
-
-
-class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* left() { return inputs_[1]; }
-  LOperand* right() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
-  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
-  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
-    inputs_[0] = context;
-    inputs_[1] = string;
-    inputs_[2] = index;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* string() { return inputs_[1]; }
-  LOperand* index() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
-  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LStringCharFromCode(LOperand* context, LOperand* char_code) {
-    inputs_[0] = context;
-    inputs_[1] = char_code;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* char_code() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
-  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckValue(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
-  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
-  LCheckInstanceType(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
-  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckMaps(LOperand* value = NULL) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCheckSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LClampDToUint8(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LClampIToUint8(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
-  LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
-    inputs_[0] = value;
-    temps_[0] = temp_xmm;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-  LOperand* temp_xmm() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckNonSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LDoubleBits(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
-  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LConstructDouble(LOperand* hi, LOperand* lo) {
-    inputs_[0] = hi;
-    inputs_[1] = lo;
-  }
-
-  LOperand* hi() { return inputs_[0]; }
-  LOperand* lo() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
-class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
-  LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
-    inputs_[0] = context;
-    inputs_[1] = size;
-    temps_[0] = temp;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* size() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
-  DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LRegExpLiteral(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
-  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LFunctionLiteral(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
-class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LTypeof(LOperand* context, LOperand* value) {
-    inputs_[0] = context;
-    inputs_[1] = value;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
- public:
-  explicit LTypeofIsAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
-  Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
-  void PrintDataTo(StringStream* stream) OVERRIDE;
-};
-
-
-class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
-  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
-  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStackCheck(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
-  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
-  Label* done_label() { return &done_label_; }
-
- private:
-  Label done_label_;
-};
-
-
-class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LForInPrepareMap(LOperand* context, LOperand* object) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInCacheArray(LOperand* map) {
-    inputs_[0] = map;
-  }
-
-  LOperand* map() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
-  int idx() {
-    return HForInCacheArray::cast(this->hydrogen_value())->idx();
-  }
-};
-
-
-class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
-  LCheckMapValue(LOperand* value, LOperand* map) {
-    inputs_[0] = value;
-    inputs_[1] = map;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* map() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadFieldByIndex(LOperand* object, LOperand* index) {
-    inputs_[0] = object;
-    inputs_[1] = index;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk FINAL : public LChunk {
- public:
-  LPlatformChunk(CompilationInfo* info, HGraph* graph)
-      : LChunk(info, graph),
-        num_double_slots_(0) { }
-
-  int GetNextSpillIndex(RegisterKind kind);
-  LOperand* GetNextSpillSlot(RegisterKind kind);
-
-  int num_double_slots() const { return num_double_slots_; }
-
- private:
-  int num_double_slots_;
-};
-
-
-class LChunkBuilder FINAL : public LChunkBuilderBase {
- public:
-  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(info, graph),
-        current_instruction_(NULL),
-        current_block_(NULL),
-        next_block_(NULL),
-        allocator_(allocator) {}
-
-  // Build the sequence for the graph.
-  LPlatformChunk* Build();
-
-  // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
-  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
-  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
-  LInstruction* DoMathRound(HUnaryMathOperation* instr);
-  LInstruction* DoMathFround(HUnaryMathOperation* instr);
-  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
-  LInstruction* DoMathLog(HUnaryMathOperation* instr);
-  LInstruction* DoMathExp(HUnaryMathOperation* instr);
-  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
-  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
-  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
-  LInstruction* DoDivByPowerOf2I(HDiv* instr);
-  LInstruction* DoDivByConstI(HDiv* instr);
-  LInstruction* DoDivI(HDiv* instr);
-  LInstruction* DoModByPowerOf2I(HMod* instr);
-  LInstruction* DoModByConstI(HMod* instr);
-  LInstruction* DoModI(HMod* instr);
-  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
-  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
-  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
-  // Methods for getting operands for Use / Define / Temp.
-  LUnallocated* ToUnallocated(Register reg);
-  LUnallocated* ToUnallocated(XMMRegister reg);
-
-  // Methods for setting up define-use relationships.
-  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
-  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
-  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
-                                           XMMRegister fixed_register);
-
-  // A value that is guaranteed to be allocated to a register.
-  // Operand created by UseRegister is guaranteed to be live until the end of
-  // instruction. This means that register allocator will not reuse it's
-  // register for any other operand inside instruction.
-  // Operand created by UseRegisterAtStart is guaranteed to be live only at
-  // instruction start. Register allocator is free to assign the same register
-  // to some other operand used inside instruction (i.e. temporary or
-  // output).
-  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
-  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
-  // An input operand in a register that may be trashed.
-  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
-  // An input operand in a register or stack slot.
-  MUST_USE_RESULT LOperand* Use(HValue* value);
-  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
-  // An input operand in a register, stack slot or a constant operand.
-  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
-  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
-  // An input operand in a fixed register or a constant operand.
-  MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
-                                               Register fixed_register);
-
-  // An input operand in a register or a constant operand.
-  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
-  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
-  // An input operand in a constant operand.
-  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
-  // An input operand in register, stack slot or a constant operand.
-  // Will not be moved to a register even if one is freely available.
-  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
-
-  // Temporary operand that must be in a register.
-  MUST_USE_RESULT LUnallocated* TempRegister();
-  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
-  MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
-  // Methods for setting up define-use relationships.
-  // Return the same instruction that they are passed.
-  LInstruction* Define(LTemplateResultInstruction<1>* instr,
-                       LUnallocated* result);
-  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
-  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
-                                int index);
-  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
-  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
-                            Register reg);
-  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
-                                  XMMRegister reg);
-  // Assigns an environment to an instruction.  An instruction which can
-  // deoptimize must have an environment.
-  LInstruction* AssignEnvironment(LInstruction* instr);
-  // Assigns a pointer map to an instruction.  An instruction which can
-  // trigger a GC or a lazy deoptimization must have a pointer map.
-  LInstruction* AssignPointerMap(LInstruction* instr);
-
-  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
-  LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
-
-  // Marks a call for the register allocator.  Assigns a pointer map to
-  // support GC and lazy deoptimization.  Assigns an environment to support
-  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
-  LInstruction* MarkAsCall(
-      LInstruction* instr,
-      HInstruction* hinstr,
-      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
-  void VisitInstruction(HInstruction* current);
-  void AddInstruction(LInstruction* instr, HInstruction* current);
-
-  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
-  LInstruction* DoArithmeticD(Token::Value op,
-                              HArithmeticBinaryOperation* instr);
-  LInstruction* DoArithmeticT(Token::Value op,
-                              HBinaryOperation* instr);
-
-  LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
-
-  HInstruction* current_instruction_;
-  HBasicBlock* current_block_;
-  HBasicBlock* next_block_;
-  LAllocator* allocator_;
-
-  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 38259d7..5f80b4d 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2,19 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_IA32
 
 #include "src/base/bits.h"
 #include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
-#include "src/cpu-profiler.h"
-#include "src/debug.h"
-#include "src/isolate-inl.h"
+#include "src/debug/debug.h"
+#include "src/ia32/frames-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
 #include "src/runtime/runtime.h"
-#include "src/serialize.h"
 
 namespace v8 {
 namespace internal {
@@ -22,14 +19,14 @@
 // -------------------------------------------------------------------------
 // MacroAssembler implementation.
 
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+                               CodeObjectRequired create_code_object)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       has_frame_(false) {
-  if (isolate() != NULL) {
-    // TODO(titzer): should we just use a null handle here instead?
-    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
-                                  isolate());
+  if (create_code_object == CodeObjectRequired::kYes) {
+    code_object_ =
+        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
   }
 }
 
@@ -69,8 +66,7 @@
 
 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
   if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
-    Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
-    mov(destination, value);
+    mov(destination, isolate()->heap()->root_handle(index));
     return;
   }
   ExternalReference roots_array_start =
@@ -108,16 +104,20 @@
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
-  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
-  cmp(with, value);
+  cmp(with, isolate()->heap()->root_handle(index));
 }
 
 
 void MacroAssembler::CompareRoot(const Operand& with,
                                  Heap::RootListIndex index) {
   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
-  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
-  cmp(with, value);
+  cmp(with, isolate()->heap()->root_handle(index));
+}
+
+
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+  Push(isolate()->heap()->root_handle(index));
 }
 
 
@@ -574,9 +574,10 @@
 
 void MacroAssembler::DebugBreak() {
   Move(eax, Immediate(0));
-  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
+  mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
+                                       isolate())));
   CEntryStub ces(isolate(), 1);
-  call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+  call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
 }
 
 
@@ -675,7 +676,7 @@
     XMMRegister scratch2,
     Label* fail,
     int elements_offset) {
-  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+  Label smi_value, done;
   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
 
   CheckMap(maybe_number,
@@ -683,31 +684,10 @@
            fail,
            DONT_DO_SMI_CHECK);
 
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  cmp(FieldOperand(maybe_number, offset),
-      Immediate(kNaNOrInfinityLowerBoundUpper32));
-  j(greater_equal, &maybe_nan, Label::kNear);
-
-  bind(&not_nan);
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
-  bind(&have_double_value);
-  movsd(FieldOperand(elements, key, times_4,
-                     FixedDoubleArray::kHeaderSize - elements_offset),
-        scratch2);
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  j(greater, &is_nan, Label::kNear);
-  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
-  j(zero, &not_nan);
-  bind(&is_nan);
-  movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
-  jmp(&have_double_value, Label::kNear);
+  // Double value, turn potential sNaN into qNaN.
+  Move(scratch2, 1.0);
+  mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+  jmp(&done, Label::kNear);
 
   bind(&smi_value);
   // Value is a smi. Convert to a double and store.
@@ -715,10 +695,10 @@
   mov(scratch1, maybe_number);
   SmiUntag(scratch1);
   Cvtsi2sd(scratch2, scratch1);
+  bind(&done);
   movsd(FieldOperand(elements, key, times_4,
                      FixedDoubleArray::kHeaderSize - elements_offset),
         scratch2);
-  bind(&done);
 }
 
 
@@ -777,26 +757,6 @@
 }
 
 
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
-                                          Register map,
-                                          Register scratch,
-                                          Label* fail) {
-  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
-  IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
-                                            Register scratch,
-                                            Label* fail) {
-  movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  cmp(scratch,
-      LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  j(above, fail);
-}
-
-
 void MacroAssembler::FCmp() {
   fucomip();
   fstp(0);
@@ -849,6 +809,30 @@
 }
 
 
+void MacroAssembler::AssertFunction(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAFunction);
+    Push(object);
+    CmpObjectType(object, JS_FUNCTION_TYPE, object);
+    Pop(object);
+    Check(equal, kOperandIsNotAFunction);
+  }
+}
+
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+    Push(object);
+    CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+    Pop(object);
+    Check(equal, kOperandIsNotABoundFunction);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   if (emit_debug_code()) {
     Label done_checking;
@@ -896,6 +880,13 @@
 }
 
 
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+  mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+  mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type,
                                 bool load_constant_pool_pointer_reg) {
   // Out-of-line constant pool not implemented on ia32.
@@ -995,7 +986,7 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
   // Optionally restore all XMM registers.
   if (save_doubles) {
     const int offset = -2 * kPointerSize;
@@ -1005,15 +996,20 @@
     }
   }
 
-  // Get the return address from the stack and restore the frame pointer.
-  mov(ecx, Operand(ebp, 1 * kPointerSize));
-  mov(ebp, Operand(ebp, 0 * kPointerSize));
+  if (pop_arguments) {
+    // Get the return address from the stack and restore the frame pointer.
+    mov(ecx, Operand(ebp, 1 * kPointerSize));
+    mov(ebp, Operand(ebp, 0 * kPointerSize));
 
-  // Pop the arguments and the receiver from the caller stack.
-  lea(esp, Operand(esi, 1 * kPointerSize));
+    // Pop the arguments and the receiver from the caller stack.
+    lea(esp, Operand(esi, 1 * kPointerSize));
 
-  // Push the return address to get ready to return.
-  push(ecx);
+    // Push the return address to get ready to return.
+    push(ecx);
+  } else {
+    // Otherwise just leave the exit frame.
+    leave();
+  }
 
   LeaveExitFrameEpilogue(true);
 }
@@ -1044,44 +1040,21 @@
 }
 
 
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
-                                    int handler_index) {
+void MacroAssembler::PushStackHandler() {
   // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // We will build up the handler from the bottom by pushing on the stack.
-  // First push the frame pointer and context.
-  if (kind == StackHandler::JS_ENTRY) {
-    // The frame pointer does not point to a JS frame so we save NULL for
-    // ebp. We expect the code throwing an exception to check ebp before
-    // dereferencing it to restore the context.
-    push(Immediate(0));  // NULL frame pointer.
-    push(Immediate(Smi::FromInt(0)));  // No context.
-  } else {
-    push(ebp);
-    push(esi);
-  }
-  // Push the state and the code object.
-  unsigned state =
-      StackHandler::IndexField::encode(handler_index) |
-      StackHandler::KindField::encode(kind);
-  push(Immediate(state));
-  Push(CodeObject());
 
   // Link the current handler as the next handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   push(Operand::StaticVariable(handler_address));
+
   // Set this new handler as the current one.
   mov(Operand::StaticVariable(handler_address), esp);
 }
 
 
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   pop(Operand::StaticVariable(handler_address));
@@ -1089,103 +1062,6 @@
 }
 
 
-void MacroAssembler::JumpToHandlerEntry() {
-  // Compute the handler entry address and jump to it.  The handler table is
-  // a fixed array of (smi-tagged) code offsets.
-  // eax = exception, edi = code object, edx = state.
-  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
-  shr(edx, StackHandler::kKindWidth);
-  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
-  SmiUntag(edx);
-  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
-  jmp(edi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
-  // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in eax.
-  if (!value.is(eax)) {
-    mov(eax, value);
-  }
-  // Drop the stack pointer to the top of the top handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  mov(esp, Operand::StaticVariable(handler_address));
-  // Restore the next handler.
-  pop(Operand::StaticVariable(handler_address));
-
-  // Remove the code object and state, compute the handler address in edi.
-  pop(edi);  // Code object.
-  pop(edx);  // Index and state.
-
-  // Restore the context and frame pointer.
-  pop(esi);  // Context.
-  pop(ebp);  // Frame pointer.
-
-  // If the handler is a JS frame, restore the context to the frame.
-  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
-  // ebp or esi.
-  Label skip;
-  test(esi, esi);
-  j(zero, &skip, Label::kNear);
-  mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-  bind(&skip);
-
-  JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
-  // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in eax.
-  if (!value.is(eax)) {
-    mov(eax, value);
-  }
-  // Drop the stack pointer to the top of the top stack handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  mov(esp, Operand::StaticVariable(handler_address));
-
-  // Unwind the handlers until the top ENTRY handler is found.
-  Label fetch_next, check_kind;
-  jmp(&check_kind, Label::kNear);
-  bind(&fetch_next);
-  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
-
-  bind(&check_kind);
-  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
-  test(Operand(esp, StackHandlerConstants::kStateOffset),
-       Immediate(StackHandler::KindField::kMask));
-  j(not_zero, &fetch_next);
-
-  // Set the top handler address to next handler past the top ENTRY handler.
-  pop(Operand::StaticVariable(handler_address));
-
-  // Remove the code object and state, compute the handler address in edi.
-  pop(edi);  // Code object.
-  pop(edx);  // Index and state.
-
-  // Clear the context pointer and frame pointer (0 was saved in the handler).
-  pop(esi);
-  pop(ebp);
-
-  JumpToHandlerEntry();
-}
-
-
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
                                             Register scratch1,
                                             Register scratch2,
@@ -1205,10 +1081,7 @@
     Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
   }
   // Load the native context of the current context.
-  int offset =
-      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
-  mov(scratch1, FieldOperand(scratch1, offset));
-  mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+  mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
 
   // Check the context is a native context.
   if (emit_debug_code()) {
@@ -1293,6 +1166,7 @@
   mov(scratch, r0);
   shr(scratch, 16);
   xor_(r0, scratch);
+  and_(r0, 0x3fffffff);
 }
 
 
@@ -1359,7 +1233,7 @@
   // Check that the value is a field property.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(FIELD, 0);
+  DCHECK_EQ(DATA, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
   j(not_zero, miss);
@@ -1452,12 +1326,11 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
-    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+    if ((flags & PRETENURE) != 0) {
       cmp(result, Operand::StaticVariable(allocation_limit));
       j(above_equal, gc_required);
     }
@@ -1529,12 +1402,11 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
-    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+    if ((flags & PRETENURE) != 0) {
       cmp(result, Operand::StaticVariable(allocation_limit));
       j(above_equal, gc_required);
     }
@@ -1604,12 +1476,11 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
-    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+    if ((flags & PRETENURE) != 0) {
       cmp(result, Operand::StaticVariable(allocation_limit));
       j(above_equal, gc_required);
     }
@@ -1639,20 +1510,6 @@
 }
 
 
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
-  ExternalReference new_space_allocation_top =
-      ExternalReference::new_space_allocation_top_address(isolate());
-
-  // Make sure the object has no tag before resetting top.
-  and_(object, Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
-  cmp(object, Operand::StaticVariable(new_space_allocation_top));
-  Check(below, kUndoAllocationOfNonAllocatedMemory);
-#endif
-  mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch1,
                                         Register scratch2,
@@ -1819,6 +1676,27 @@
 }
 
 
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+                                     Register value, Register scratch,
+                                     Label* gc_required) {
+  DCHECK(!result.is(constructor));
+  DCHECK(!result.is(scratch));
+  DCHECK(!result.is(value));
+
+  // Allocate JSValue in new space.
+  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+  // Initialize the JSValue.
+  LoadGlobalFunctionInitialMap(constructor, scratch);
+  mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
+  LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+  mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+  mov(FieldOperand(result, JSValue::kValueOffset), value);
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
 // long or aligned copies.  The contents of scratch and length are destroyed.
 // Source and destination are incremented by length.
@@ -1886,17 +1764,17 @@
 }
 
 
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
-                                                Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+                                                Register end_address,
                                                 Register filler) {
   Label loop, entry;
   jmp(&entry);
   bind(&loop);
-  mov(Operand(start_offset, 0), filler);
-  add(start_offset, Immediate(kPointerSize));
+  mov(Operand(current_address, 0), filler);
+  add(current_address, Immediate(kPointerSize));
   bind(&entry);
-  cmp(start_offset, end_offset);
-  j(less, &loop);
+  cmp(current_address, end_address);
+  j(below, &loop);
 }
 
 
@@ -1940,33 +1818,22 @@
 }
 
 
-void MacroAssembler::TryGetFunctionPrototype(Register function,
-                                             Register result,
-                                             Register scratch,
-                                             Label* miss,
-                                             bool miss_on_bound_function) {
-  Label non_instance;
-  if (miss_on_bound_function) {
-    // Check that the receiver isn't a smi.
-    JumpIfSmi(function, miss);
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+                                       Register temp) {
+  Label done, loop;
+  mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
+  bind(&loop);
+  JumpIfSmi(result, &done, Label::kNear);
+  CmpObjectType(result, MAP_TYPE, temp);
+  j(not_equal, &done, Label::kNear);
+  mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
+  jmp(&loop);
+  bind(&done);
+}
 
-    // Check that the function really is a function.
-    CmpObjectType(function, JS_FUNCTION_TYPE, result);
-    j(not_equal, miss);
 
-    // If a bound function, go to miss label.
-    mov(scratch,
-        FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
-                   SharedFunctionInfo::kBoundFunction);
-    j(not_zero, miss);
-
-    // Make sure that the function has an instance prototype.
-    movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
-    test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
-    j(not_zero, &non_instance);
-  }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+                                             Register scratch, Label* miss) {
   // Get the prototype or initial map from the function.
   mov(result,
       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1980,20 +1847,11 @@
   // If the function does not have an initial map, we're done.
   Label done;
   CmpObjectType(result, MAP_TYPE, scratch);
-  j(not_equal, &done);
+  j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   mov(result, FieldOperand(result, Map::kPrototypeOffset));
 
-  if (miss_on_bound_function) {
-    jmp(&done);
-
-    // Non-instance prototype: Fetch prototype from constructor field
-    // in initial map.
-    bind(&non_instance);
-    mov(result, FieldOperand(result, Map::kConstructorOffset));
-  }
-
   // All done.
   bind(&done);
 }
@@ -2063,187 +1921,27 @@
 }
 
 
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
-                                               int num_arguments,
-                                               int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Move(eax, Immediate(num_arguments));
-  JumpToExternalReference(ext);
-}
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+  // ----------- S t a t e -------------
+  //  -- esp[0]                 : return address
+  //  -- esp[8]                 : argument num_arguments - 1
+  //  ...
+  //  -- esp[8 * num_arguments] : argument 0 (receiver)
+  //
+  //  For runtime functions with variable arguments:
+  //  -- eax                    : number of  arguments
+  // -----------------------------------
 
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
-                                     int num_arguments,
-                                     int result_size) {
-  TailCallExternalReference(ExternalReference(fid, isolate()),
-                            num_arguments,
-                            result_size);
-}
-
-
-Operand ApiParameterOperand(int index) {
-  return Operand(esp, index * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc) {
-  EnterApiExitFrame(argc);
-  if (emit_debug_code()) {
-    mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
+  const Runtime::Function* function = Runtime::FunctionForId(fid);
+  DCHECK_EQ(1, function->result_size);
+  if (function->nargs >= 0) {
+    // TODO(1236192): Most runtime routines don't need the number of
+    // arguments passed in because it is constant. At some point we
+    // should remove this need and make the runtime routine entry code
+    // smarter.
+    mov(eax, Immediate(function->nargs));
   }
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
-    Register function_address,
-    ExternalReference thunk_ref,
-    Operand thunk_last_arg,
-    int stack_space,
-    Operand return_value_operand,
-    Operand* context_restore_operand) {
-  ExternalReference next_address =
-      ExternalReference::handle_scope_next_address(isolate());
-  ExternalReference limit_address =
-      ExternalReference::handle_scope_limit_address(isolate());
-  ExternalReference level_address =
-      ExternalReference::handle_scope_level_address(isolate());
-
-  DCHECK(edx.is(function_address));
-  // Allocate HandleScope in callee-save registers.
-  mov(ebx, Operand::StaticVariable(next_address));
-  mov(edi, Operand::StaticVariable(limit_address));
-  add(Operand::StaticVariable(level_address), Immediate(1));
-
-  if (FLAG_log_timer_events) {
-    FrameScope frame(this, StackFrame::MANUAL);
-    PushSafepointRegisters();
-    PrepareCallCFunction(1, eax);
-    mov(Operand(esp, 0),
-        Immediate(ExternalReference::isolate_address(isolate())));
-    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
-    PopSafepointRegisters();
-  }
-
-
-  Label profiler_disabled;
-  Label end_profiler_check;
-  mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
-  cmpb(Operand(eax, 0), 0);
-  j(zero, &profiler_disabled);
-
-  // Additional parameter is the address of the actual getter function.
-  mov(thunk_last_arg, function_address);
-  // Call the api function.
-  mov(eax, Immediate(thunk_ref));
-  call(eax);
-  jmp(&end_profiler_check);
-
-  bind(&profiler_disabled);
-  // Call the api function.
-  call(function_address);
-  bind(&end_profiler_check);
-
-  if (FLAG_log_timer_events) {
-    FrameScope frame(this, StackFrame::MANUAL);
-    PushSafepointRegisters();
-    PrepareCallCFunction(1, eax);
-    mov(Operand(esp, 0),
-        Immediate(ExternalReference::isolate_address(isolate())));
-    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
-    PopSafepointRegisters();
-  }
-
-  Label prologue;
-  // Load the value from ReturnValue
-  mov(eax, return_value_operand);
-
-  Label promote_scheduled_exception;
-  Label exception_handled;
-  Label delete_allocated_handles;
-  Label leave_exit_frame;
-
-  bind(&prologue);
-  // No more valid handles (the result handle was the last one). Restore
-  // previous handle scope.
-  mov(Operand::StaticVariable(next_address), ebx);
-  sub(Operand::StaticVariable(level_address), Immediate(1));
-  Assert(above_equal, kInvalidHandleScopeLevel);
-  cmp(edi, Operand::StaticVariable(limit_address));
-  j(not_equal, &delete_allocated_handles);
-  bind(&leave_exit_frame);
-
-  // Check if the function scheduled an exception.
-  ExternalReference scheduled_exception_address =
-      ExternalReference::scheduled_exception_address(isolate());
-  cmp(Operand::StaticVariable(scheduled_exception_address),
-      Immediate(isolate()->factory()->the_hole_value()));
-  j(not_equal, &promote_scheduled_exception);
-  bind(&exception_handled);
-
-#if ENABLE_EXTRA_CHECKS
-  // Check if the function returned a valid JavaScript value.
-  Label ok;
-  Register return_value = eax;
-  Register map = ecx;
-
-  JumpIfSmi(return_value, &ok, Label::kNear);
-  mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
-  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
-  j(below, &ok, Label::kNear);
-
-  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
-  j(above_equal, &ok, Label::kNear);
-
-  cmp(map, isolate()->factory()->heap_number_map());
-  j(equal, &ok, Label::kNear);
-
-  cmp(return_value, isolate()->factory()->undefined_value());
-  j(equal, &ok, Label::kNear);
-
-  cmp(return_value, isolate()->factory()->true_value());
-  j(equal, &ok, Label::kNear);
-
-  cmp(return_value, isolate()->factory()->false_value());
-  j(equal, &ok, Label::kNear);
-
-  cmp(return_value, isolate()->factory()->null_value());
-  j(equal, &ok, Label::kNear);
-
-  Abort(kAPICallReturnedInvalidObject);
-
-  bind(&ok);
-#endif
-
-  bool restore_context = context_restore_operand != NULL;
-  if (restore_context) {
-    mov(esi, *context_restore_operand);
-  }
-  LeaveApiExitFrame(!restore_context);
-  ret(stack_space * kPointerSize);
-
-  bind(&promote_scheduled_exception);
-  {
-    FrameScope frame(this, StackFrame::INTERNAL);
-    CallRuntime(Runtime::kPromoteScheduledException, 0);
-  }
-  jmp(&exception_handled);
-
-  // HandleScope limit has changed. Delete allocated extensions.
-  ExternalReference delete_extensions =
-      ExternalReference::delete_handle_scope_extensions(isolate());
-  bind(&delete_allocated_handles);
-  mov(Operand::StaticVariable(limit_address), edi);
-  mov(edi, eax);
-  mov(Operand(esp, 0),
-      Immediate(ExternalReference::isolate_address(isolate())));
-  mov(eax, Immediate(delete_extensions));
-  call(eax);
-  mov(eax, edi);
-  jmp(&leave_exit_frame);
+  JumpToExternalReference(ExternalReference(fid, isolate()));
 }
 
 
@@ -2257,8 +1955,6 @@
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
-                                    Handle<Code> code_constant,
-                                    const Operand& code_operand,
                                     Label* done,
                                     bool* definitely_mismatches,
                                     InvokeFlag flag,
@@ -2269,10 +1965,10 @@
   Label invoke;
   if (expected.is_immediate()) {
     DCHECK(actual.is_immediate());
+    mov(eax, actual.immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
-      mov(eax, actual.immediate());
       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
       if (expected.immediate() == sentinel) {
         // Don't worry about adapting arguments for builtins that
@@ -2290,10 +1986,10 @@
       // Expected is in register, actual is immediate. This is the
       // case when we invoke function values without going through the
       // IC mechanism.
+      mov(eax, actual.immediate());
       cmp(expected.reg(), actual.immediate());
       j(equal, &invoke);
       DCHECK(expected.reg().is(ebx));
-      mov(eax, actual.immediate());
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
@@ -2301,19 +1997,14 @@
       j(equal, &invoke);
       DCHECK(actual.reg().is(eax));
       DCHECK(expected.reg().is(ebx));
+    } else {
+      Move(eax, actual.reg());
     }
   }
 
   if (!definitely_matches) {
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
-    if (!code_constant.is_null()) {
-      mov(edx, Immediate(code_constant));
-      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    } else if (!code_operand.is_reg(edx)) {
-      mov(edx, code_operand);
-    }
-
     if (flag == CALL_FUNCTION) {
       call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
       call(adaptor, RelocInfo::CODE_TARGET);
@@ -2329,20 +2020,76 @@
 }
 
 
-void MacroAssembler::InvokeCode(const Operand& code,
-                                const ParameterCount& expected,
-                                const ParameterCount& actual,
-                                InvokeFlag flag,
-                                const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+                                             const ParameterCount& expected,
+                                             const ParameterCount& actual) {
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(isolate());
+  cmpb(Operand::StaticVariable(step_in_enabled), 0);
+  j(equal, &skip_flooding);
+  {
+    FrameScope frame(this,
+                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+    if (expected.is_reg()) {
+      SmiTag(expected.reg());
+      Push(expected.reg());
+    }
+    if (actual.is_reg()) {
+      SmiTag(actual.reg());
+      Push(actual.reg());
+    }
+    if (new_target.is_valid()) {
+      Push(new_target);
+    }
+    Push(fun);
+    Push(fun);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    Pop(fun);
+    if (new_target.is_valid()) {
+      Pop(new_target);
+    }
+    if (actual.is_reg()) {
+      Pop(actual.reg());
+      SmiUntag(actual.reg());
+    }
+    if (expected.is_reg()) {
+      Pop(expected.reg());
+      SmiUntag(expected.reg());
+    }
+  }
+  bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+                                        const ParameterCount& expected,
+                                        const ParameterCount& actual,
+                                        InvokeFlag flag,
+                                        const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
   DCHECK(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(function.is(edi));
+  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+
+  if (call_wrapper.NeedsDebugStepCheck()) {
+    FloodFunctionIfStepping(function, new_target, expected, actual);
+  }
+
+  // Clear the new.target register if not given.
+  if (!new_target.is_valid()) {
+    mov(edx, isolate()->factory()->undefined_value());
+  }
 
   Label done;
   bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, &definitely_mismatches, flag, Label::kNear,
-                 call_wrapper);
+  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+                 Label::kNear, call_wrapper);
   if (!definitely_mismatches) {
+    // We call indirectly through the code field in the function to
+    // allow recompilation to take effect without changing any of the
+    // call sites.
+    Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
     if (flag == CALL_FUNCTION) {
       call_wrapper.BeforeCall(CallSize(code));
       call(code);
@@ -2357,6 +2104,7 @@
 
 
 void MacroAssembler::InvokeFunction(Register fun,
+                                    Register new_target,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
@@ -2364,14 +2112,13 @@
   DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   DCHECK(fun.is(edi));
-  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
   SmiUntag(ebx);
 
   ParameterCount expected(ebx);
-  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag, call_wrapper);
+  InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
 }
 
 
@@ -2386,8 +2133,7 @@
   DCHECK(fun.is(edi));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag, call_wrapper);
+  InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
 }
 
 
@@ -2401,38 +2147,23 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
   DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  // Rely on the assertion to check that the number of provided
-  // arguments match the expected number of arguments. Fake a
-  // parameter count to avoid emitting code to do the check.
+  // Fake a parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
-  GetBuiltinFunction(edi, id);
-  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, expected, flag, call_wrapper);
+  GetBuiltinFunction(edi, native_context_index);
+  InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
 }
 
 
 void MacroAssembler::GetBuiltinFunction(Register target,
-                                        Builtins::JavaScript id) {
+                                        int native_context_index) {
   // Load the JavaScript builtin function from the builtins object.
-  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
-  mov(target, FieldOperand(target,
-                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  DCHECK(!target.is(edi));
-  // Load the JavaScript builtin function from the builtins object.
-  GetBuiltinFunction(edi, id);
-  // Load the code entry point from the function into the target register.
-  mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  mov(target, NativeContextOperand());
+  mov(target, ContextOperand(target, native_context_index));
 }
 
 
@@ -2462,41 +2193,38 @@
 }
 
 
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+  mov(dst, NativeContextOperand());
+  mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
+}
+
+
 void MacroAssembler::LoadTransitionedArrayMapConditional(
     ElementsKind expected_kind,
     ElementsKind transitioned_kind,
     Register map_in_out,
     Register scratch,
     Label* no_map_match) {
-  // Load the global or builtins object from the current context.
-  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+  DCHECK(IsFastElementsKind(expected_kind));
+  DCHECK(IsFastElementsKind(transitioned_kind));
 
   // Check that the function's map is the same as the expected cached map.
-  mov(scratch, Operand(scratch,
-                       Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
-  size_t offset = expected_kind * kPointerSize +
-      FixedArrayBase::kHeaderSize;
-  cmp(map_in_out, FieldOperand(scratch, offset));
+  mov(scratch, NativeContextOperand());
+  cmp(map_in_out,
+      ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
   j(not_equal, no_map_match);
 
   // Use the transitioned cached map.
-  offset = transitioned_kind * kPointerSize +
-      FixedArrayBase::kHeaderSize;
-  mov(map_in_out, FieldOperand(scratch, offset));
+  mov(map_in_out,
+      ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
 }
 
 
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
-  // Load the global or builtins object from the current context.
-  mov(function,
-      Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  // Load the native context from the global or builtins object.
-  mov(function,
-      FieldOperand(function, GlobalObject::kNativeContextOffset));
+  // Load the native context from the current context.
+  mov(function, NativeContextOperand());
   // Load the function from the native context.
-  mov(function, Operand(function, Context::SlotOffset(index)));
+  mov(function, ContextOperand(function, index));
 }
 
 
@@ -2587,10 +2315,15 @@
 }
 
 
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
-                                   Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
   mov(value, cell);
   mov(value, FieldOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  GetWeakValue(value, cell);
   JumpIfSmi(value, miss);
 }
 
@@ -2666,11 +2399,11 @@
 
 
 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
-  uint32_t lower = static_cast<uint32_t>(src);
-  uint32_t upper = static_cast<uint32_t>(src >> 32);
-  if (upper == 0) {
-    Move(dst, lower);
+  if (src == 0) {
+    pxor(dst, dst);
   } else {
+    uint32_t lower = static_cast<uint32_t>(src);
+    uint32_t upper = static_cast<uint32_t>(src >> 32);
     unsigned cnt = base::bits::CountPopulation64(src);
     unsigned nlz = base::bits::CountLeadingZeros64(src);
     unsigned ntz = base::bits::CountTrailingZeros64(src);
@@ -2703,6 +2436,80 @@
 }
 
 
+void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+  if (imm8 == 0) {
+    movd(dst, src);
+    return;
+  }
+  DCHECK_EQ(1, imm8);
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatureScope sse_scope(this, SSE4_1);
+    pextrd(dst, src, imm8);
+    return;
+  }
+  pshufd(xmm0, src, 1);
+  movd(dst, xmm0);
+}
+
+
+void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+  DCHECK(imm8 == 0 || imm8 == 1);
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatureScope sse_scope(this, SSE4_1);
+    pinsrd(dst, src, imm8);
+    return;
+  }
+  movd(xmm0, src);
+  if (imm8 == 1) {
+    punpckldq(dst, xmm0);
+  } else {
+    DCHECK_EQ(0, imm8);
+    psrlq(dst, 32);
+    punpckldq(xmm0, dst);
+    movaps(dst, xmm0);
+  }
+}
+
+
+void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(LZCNT)) {
+    CpuFeatureScope scope(this, LZCNT);
+    lzcnt(dst, src);
+    return;
+  }
+  Label not_zero_src;
+  bsr(dst, src);
+  j(not_zero, &not_zero_src, Label::kNear);
+  Move(dst, Immediate(63));  // 63^31 == 32
+  bind(&not_zero_src);
+  xor_(dst, Immediate(31));  // for x in [0..31], 31^x == 31-x.
+}
+
+
+void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(BMI1)) {
+    CpuFeatureScope scope(this, BMI1);
+    tzcnt(dst, src);
+    return;
+  }
+  Label not_zero_src;
+  bsf(dst, src);
+  j(not_zero, &not_zero_src, Label::kNear);
+  Move(dst, Immediate(32));  // The result of tzcnt is 32 if src = 0.
+  bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(POPCNT)) {
+    CpuFeatureScope scope(this, POPCNT);
+    popcnt(dst, src);
+    return;
+  }
+  UNREACHABLE();
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2855,6 +2662,18 @@
 }
 
 
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+                                  int accessor_index,
+                                  AccessorComponent accessor) {
+  mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
+  LoadInstanceDescriptors(dst, dst);
+  mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+                                           : AccessorPair::kSetterOffset;
+  mov(dst, FieldOperand(dst, offset));
+}
+
+
 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
                                   Register scratch,
                                   int power) {
@@ -2866,81 +2685,6 @@
 }
 
 
-void MacroAssembler::LookupNumberStringCache(Register object,
-                                             Register result,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* not_found) {
-  // Use of registers. Register result is used as a temporary.
-  Register number_string_cache = result;
-  Register mask = scratch1;
-  Register scratch = scratch2;
-
-  // Load the number string cache.
-  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-  // Make the hash mask from the length of the number string cache. It
-  // contains two elements (number and string) for each cache entry.
-  mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
-  shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
-  sub(mask, Immediate(1));  // Make mask.
-
-  // Calculate the entry in the number string cache. The hash value in the
-  // number string cache for smis is just the smi value, and the hash for
-  // doubles is the xor of the upper and lower words. See
-  // Heap::GetNumberStringCache.
-  Label smi_hash_calculated;
-  Label load_result_from_cache;
-  Label not_smi;
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfNotSmi(object, &not_smi, Label::kNear);
-  mov(scratch, object);
-  SmiUntag(scratch);
-  jmp(&smi_hash_calculated, Label::kNear);
-  bind(&not_smi);
-  cmp(FieldOperand(object, HeapObject::kMapOffset),
-      isolate()->factory()->heap_number_map());
-  j(not_equal, not_found);
-  STATIC_ASSERT(8 == kDoubleSize);
-  mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
-  xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
-  // Object is heap number and hash is now in scratch. Calculate cache index.
-  and_(scratch, mask);
-  Register index = scratch;
-  Register probe = mask;
-  mov(probe,
-      FieldOperand(number_string_cache,
-                   index,
-                   times_twice_pointer_size,
-                   FixedArray::kHeaderSize));
-  JumpIfSmi(probe, not_found);
-  movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
-  ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
-  j(parity_even, not_found);  // Bail out if NaN is involved.
-  j(not_equal, not_found);  // The cache did not contain this value.
-  jmp(&load_result_from_cache, Label::kNear);
-
-  bind(&smi_hash_calculated);
-  // Object is smi and hash is now in scratch. Calculate cache index.
-  and_(scratch, mask);
-  // Check if the entry is the smi we are looking for.
-  cmp(object,
-      FieldOperand(number_string_cache,
-                   index,
-                   times_twice_pointer_size,
-                   FixedArray::kHeaderSize));
-  j(not_equal, not_found);
-
-  // Get the result from the cache.
-  bind(&load_result_from_cache);
-  mov(result,
-      FieldOperand(number_string_cache,
-                   index,
-                   times_twice_pointer_size,
-                   FixedArray::kHeaderSize + kPointerSize));
-  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
-}
-
-
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
     Register instance_type, Register scratch, Label* failure) {
   if (!scratch.is(instance_type)) {
@@ -3104,10 +2848,10 @@
 #endif
 
 
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
     : address_(address),
       size_(size),
-      masm_(NULL, address, size + Assembler::kGap) {
+      masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
@@ -3117,7 +2861,7 @@
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
-  CpuFeatures::FlushICache(address_, size_);
+  Assembler::FlushICache(masm_.isolate(), address_, size_);
 
   // Check that the code was patched as expected.
   DCHECK(masm_.pc_ == address_ + size_);
@@ -3177,10 +2921,9 @@
                                  Register scratch1,
                                  Label* on_black,
                                  Label::Distance on_black_near) {
-  HasColor(object, scratch0, scratch1,
-           on_black, on_black_near,
-           1, 0);  // kBlackBitPattern.
-  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
+           1);  // kBlackBitPattern.
+  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
 }
 
 
@@ -3234,110 +2977,22 @@
 }
 
 
-void MacroAssembler::EnsureNotWhite(
-    Register value,
-    Register bitmap_scratch,
-    Register mask_scratch,
-    Label* value_is_white_and_not_data,
-    Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+                                 Register mask_scratch, Label* value_is_white,
+                                 Label::Distance distance) {
   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
-  Label done;
-
   // Since both black and grey have a 1 in the first position and white does
   // not have a 1 there we only need to check one bit.
   test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  j(not_zero, &done, Label::kNear);
-
-  if (emit_debug_code()) {
-    // Check for impossible bit pattern.
-    Label ok;
-    push(mask_scratch);
-    // shl.  May overflow making the check conservative.
-    add(mask_scratch, mask_scratch);
-    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-    j(zero, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-    pop(mask_scratch);
-  }
-
-  // Value is white.  We check whether it is data that doesn't need scanning.
-  // Currently only checks for HeapNumber and non-cons strings.
-  Register map = ecx;  // Holds map while checking type.
-  Register length = ecx;  // Holds length of object after checking type.
-  Label not_heap_number;
-  Label is_data_object;
-
-  // Check for heap-number
-  mov(map, FieldOperand(value, HeapObject::kMapOffset));
-  cmp(map, isolate()->factory()->heap_number_map());
-  j(not_equal, &not_heap_number, Label::kNear);
-  mov(length, Immediate(HeapNumber::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_heap_number);
-  // Check for strings.
-  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  Register instance_type = ecx;
-  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
-  j(not_zero, value_is_white_and_not_data);
-  // It's a non-indirect (non-cons and non-slice) string.
-  // If it's external, the length is just ExternalString::kSize.
-  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
-  Label not_external;
-  // External strings are the only ones with the kExternalStringTag bit
-  // set.
-  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
-  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
-  test_b(instance_type, kExternalStringTag);
-  j(zero, &not_external, Label::kNear);
-  mov(length, Immediate(ExternalString::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_external);
-  // Sequential string, either Latin1 or UC16.
-  DCHECK(kOneByteStringTag == 0x04);
-  and_(length, Immediate(kStringEncodingMask));
-  xor_(length, Immediate(kStringEncodingMask));
-  add(length, Immediate(0x04));
-  // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
-  // by 2. If we multiply the string length as smi by this, it still
-  // won't overflow a 32-bit value.
-  DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
-  DCHECK(SeqOneByteString::kMaxSize <=
-         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
-  imul(length, FieldOperand(value, String::kLengthOffset));
-  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
-  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
-  and_(length, Immediate(~kObjectAlignmentMask));
-
-  bind(&is_data_object);
-  // Value is a data object, and it is white.  Mark it black.  Since we know
-  // that the object is white we can make it black by flipping one bit.
-  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
-  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
-  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
-      length);
-  if (emit_debug_code()) {
-    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
-    Check(less_equal, kLiveBytesCountOverflowChunkSize);
-  }
-
-  bind(&done);
+  j(zero, value_is_white, Label::kNear);
 }
 
 
@@ -3419,14 +3074,22 @@
   DCHECK(!scratch1.is(scratch0));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
-  Label loop_again;
+  Label loop_again, end;
 
   // scratch contained elements pointer.
   mov(current, object);
+  mov(current, FieldOperand(current, HeapObject::kMapOffset));
+  mov(current, FieldOperand(current, Map::kPrototypeOffset));
+  cmp(current, Immediate(factory->null_value()));
+  j(equal, &end);
 
   // Loop based on the map going up the prototype chain.
   bind(&loop_again);
   mov(current, FieldOperand(current, HeapObject::kMapOffset));
+  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+  CmpInstanceType(current, JS_OBJECT_TYPE);
+  j(below, found);
   mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
   DecodeField<Map::ElementsKindBits>(scratch1);
   cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
@@ -3434,6 +3097,8 @@
   mov(current, FieldOperand(current, Map::kPrototypeOffset));
   cmp(current, Immediate(factory->null_value()));
   j(not_equal, &loop_again);
+
+  bind(&end);
 }
 
 
@@ -3454,6 +3119,7 @@
 }
 
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 83f6216..76c4890 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -13,6 +13,23 @@
 namespace v8 {
 namespace internal {
 
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {Register::kCode_eax};
+const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kJSFunctionRegister = {Register::kCode_edi};
+const Register kContextRegister = {Register::kCode_esi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
+
+// Spill slots used by interpreter dispatch calling convention.
+const int kInterpreterDispatchTableSpillSlot = -1;
+
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
@@ -24,37 +41,34 @@
   kPointersToHereAreAlwaysInteresting
 };
 
-
-enum RegisterValueType {
-  REGISTER_VALUE_IS_SMI,
-  REGISTER_VALUE_IS_INT32
-};
-
+enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
 
 #ifdef DEBUG
-bool AreAliased(Register reg1,
-                Register reg2,
-                Register reg3 = no_reg,
-                Register reg4 = no_reg,
-                Register reg5 = no_reg,
-                Register reg6 = no_reg,
-                Register reg7 = no_reg,
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+                Register reg4 = no_reg, Register reg5 = no_reg,
+                Register reg6 = no_reg, Register reg7 = no_reg,
                 Register reg8 = no_reg);
 #endif
 
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  // The isolate parameter can be NULL if the macro assembler should
-  // not use isolate-dependent functionality. In this case, it's the
-  // responsibility of the caller to never invoke such function on the
-  // macro assembler.
-  MacroAssembler(Isolate* isolate, void* buffer, int size);
+  MacroAssembler(Isolate* isolate, void* buffer, int size,
+                 CodeObjectRequired create_code_object);
 
   void Load(Register dst, const Operand& src, Representation r);
   void Store(Register src, const Operand& dst, Representation r);
 
+  // Load a register with a long value as efficiently as possible.
+  void Set(Register dst, int32_t x) {
+    if (x == 0) {
+      xor_(dst, dst);
+    } else {
+      mov(dst, Immediate(x));
+    }
+  }
+  void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
+
   // Operations on roots in the root-array.
   void LoadRoot(Register destination, Heap::RootListIndex index);
   void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -63,80 +77,82 @@
   // and not in new space).
   void CompareRoot(Register with, Heap::RootListIndex index);
   void CompareRoot(const Operand& with, Heap::RootListIndex index);
+  void PushRoot(Heap::RootListIndex index);
+
+  // Compare the object in a register to a value and jump if they are equal.
+  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+                  Label::Distance if_equal_distance = Label::kFar) {
+    CompareRoot(with, index);
+    j(equal, if_equal, if_equal_distance);
+  }
+  void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+                  Label* if_equal,
+                  Label::Distance if_equal_distance = Label::kFar) {
+    CompareRoot(with, index);
+    j(equal, if_equal, if_equal_distance);
+  }
+
+  // Compare the object in a register to a value and jump if they are not equal.
+  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+                     Label* if_not_equal,
+                     Label::Distance if_not_equal_distance = Label::kFar) {
+    CompareRoot(with, index);
+    j(not_equal, if_not_equal, if_not_equal_distance);
+  }
+  void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+                     Label* if_not_equal,
+                     Label::Distance if_not_equal_distance = Label::kFar) {
+    CompareRoot(with, index);
+    j(not_equal, if_not_equal, if_not_equal_distance);
+  }
 
   // ---------------------------------------------------------------------------
   // GC Support
-  enum RememberedSetFinalAction {
-    kReturnAtEnd,
-    kFallThroughAtEnd
-  };
+  enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
 
   // Record in the remembered set the fact that we have a pointer to new space
   // at the address pointed to by the addr register.  Only works if addr is not
   // in new space.
   void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
+                           Register addr, Register scratch,
                            SaveFPRegsMode save_fp,
                            RememberedSetFinalAction and_then);
 
-  void CheckPageFlag(Register object,
-                     Register scratch,
-                     int mask,
-                     Condition cc,
+  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
                      Label* condition_met,
                      Label::Distance condition_met_distance = Label::kFar);
 
   void CheckPageFlagForMap(
-      Handle<Map> map,
-      int mask,
-      Condition cc,
-      Label* condition_met,
+      Handle<Map> map, int mask, Condition cc, Label* condition_met,
       Label::Distance condition_met_distance = Label::kFar);
 
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
-  void JumpIfNotInNewSpace(Register object,
-                           Register scratch,
-                           Label* branch,
+  void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
                            Label::Distance distance = Label::kFar) {
     InNewSpace(object, scratch, zero, branch, distance);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
   // The register scratch can be object itself, but it will be clobbered.
-  void JumpIfInNewSpace(Register object,
-                        Register scratch,
-                        Label* branch,
+  void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
                         Label::Distance distance = Label::kFar) {
     InNewSpace(object, scratch, not_zero, branch, distance);
   }
 
   // Check if an object has a given incremental marking color.  Also uses ecx!
-  void HasColor(Register object,
-                Register scratch0,
-                Register scratch1,
-                Label* has_color,
-                Label::Distance has_color_distance,
-                int first_bit,
-                int second_bit);
+  void HasColor(Register object, Register scratch0, Register scratch1,
+                Label* has_color, Label::Distance has_color_distance,
+                int first_bit, int second_bit);
 
-  void JumpIfBlack(Register object,
-                   Register scratch0,
-                   Register scratch1,
+  void JumpIfBlack(Register object, Register scratch0, Register scratch1,
                    Label* on_black,
                    Label::Distance on_black_distance = Label::kFar);
 
-  // Checks the color of an object.  If the object is already grey or black
-  // then we just fall through, since it is already live.  If it is white and
-  // we can determine that it doesn't need to be scanned, then we just mark it
-  // black and fall through.  For the rest we jump to the label so the
-  // incremental marker can fix its assumptions.
-  void EnsureNotWhite(Register object,
-                      Register scratch1,
-                      Register scratch2,
-                      Label* object_is_white_and_not_data,
-                      Label::Distance distance);
+  // Checks the color of an object.  If the object is white we jump to the
+  // incremental marker.
+  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+                   Label* value_is_white, Label::Distance distance);
 
   // Notify the garbage collector that we wrote a pointer into an object.
   // |object| is the object being stored into, |value| is the object being
@@ -144,10 +160,7 @@
   // The offset is the offset from the start of the object, not the offset from
   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
   void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
+      Register object, int offset, Register value, Register scratch,
       SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
@@ -157,22 +170,14 @@
   // As above, but the offset has the tag presubtracted.  For use with
   // Operand(reg, off).
   void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
+      Register context, int offset, Register value, Register scratch,
       SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     save_fp,
-                     remembered_set_action,
-                     smi_check,
+    RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+                     remembered_set_action, smi_check,
                      pointers_to_here_check_for_value);
   }
 
@@ -183,10 +188,7 @@
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
   void RecordWriteArray(
-      Register array,
-      Register value,
-      Register index,
-      SaveFPRegsMode save_fp,
+      Register array, Register value, Register index, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -198,10 +200,7 @@
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
   void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
-      SaveFPRegsMode save_fp,
+      Register object, Register address, Register value, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -210,12 +209,8 @@
   // For page containing |object| mark the region covering the object's map
   // dirty. |object| is the object being stored into, |map| is the Map object
   // that was stored.
-  void RecordWriteForMap(
-      Register object,
-      Handle<Map> map,
-      Register scratch1,
-      Register scratch2,
-      SaveFPRegsMode save_fp);
+  void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+                         Register scratch2, SaveFPRegsMode save_fp);
 
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -236,8 +231,8 @@
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
-  // argument in register esi.
-  void LeaveExitFrame(bool save_doubles);
+  // argument in register esi (if pop_arguments == true).
+  void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -246,16 +241,18 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
+  // Load the global proxy from the current context.
+  void LoadGlobalProxy(Register dst);
+
   // Conditionally load the cached Array transitioned map of type
   // transitioned_kind from the native context if the map in register
   // map_in_out is the cached Array map in the native context of
   // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
+  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+                                           ElementsKind transitioned_kind,
+                                           Register map_in_out,
+                                           Register scratch,
+                                           Label* no_map_match);
 
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
@@ -298,6 +295,8 @@
   // Compare the given value and the value of weak cell.
   void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
 
+  void GetWeakValue(Register value, Handle<WeakCell> cell);
+
   // Load the value of the weak cell in the value register. Branch to the given
   // miss label if the weak cell was cleared.
   void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
@@ -306,50 +305,37 @@
   // JavaScript invokes
 
   // Invoke the JavaScript function code by either calling or jumping.
-  void InvokeCode(Register code,
-                  const ParameterCount& expected,
-                  const ParameterCount& actual,
-                  InvokeFlag flag,
-                  const CallWrapper& call_wrapper) {
-    InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
-  }
 
-  void InvokeCode(const Operand& code,
-                  const ParameterCount& expected,
-                  const ParameterCount& actual,
-                  InvokeFlag flag,
-                  const CallWrapper& call_wrapper);
+  void InvokeFunctionCode(Register function, Register new_target,
+                          const ParameterCount& expected,
+                          const ParameterCount& actual, InvokeFlag flag,
+                          const CallWrapper& call_wrapper);
+
+  void FloodFunctionIfStepping(Register fun, Register new_target,
+                               const ParameterCount& expected,
+                               const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
-  void InvokeFunction(Register function,
-                      const ParameterCount& actual,
-                      InvokeFlag flag,
+  void InvokeFunction(Register function, Register new_target,
+                      const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InvokeFunction(Register function,
-                      const ParameterCount& expected,
-                      const ParameterCount& actual,
-                      InvokeFlag flag,
+  void InvokeFunction(Register function, const ParameterCount& expected,
+                      const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
   void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& expected,
-                      const ParameterCount& actual,
-                      InvokeFlag flag,
+                      const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  // Invoke specified builtin JavaScript function. Adds an entry to
-  // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeFlag flag,
+  // Invoke specified builtin JavaScript function.
+  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
                      const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the function for the given builtin in the target register.
-  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
-  // Store the code object for the given builtin in the target register.
-  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+  void GetBuiltinFunction(Register target, int native_context_index);
 
   // Expression support
   // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -372,31 +358,25 @@
 
   // Check if a map for a JSObject indicates that the object has fast elements.
   // Jump to the specified label if it does not.
-  void CheckFastElements(Register map,
-                         Label* fail,
+  void CheckFastElements(Register map, Label* fail,
                          Label::Distance distance = Label::kFar);
 
   // Check if a map for a JSObject indicates that the object can have both smi
   // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Label* fail,
+  void CheckFastObjectElements(Register map, Label* fail,
                                Label::Distance distance = Label::kFar);
 
   // Check if a map for a JSObject indicates that the object has fast smi only
   // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map,
-                            Label* fail,
+  void CheckFastSmiElements(Register map, Label* fail,
                             Label::Distance distance = Label::kFar);
 
   // Check to see if maybe_number can be stored as a double in
   // FastDoubleElements. If it can, store it at the index specified by key in
   // the FastDoubleElements array elements, otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register maybe_number,
-                                   Register elements,
-                                   Register key,
-                                   Register scratch1,
-                                   XMMRegister scratch2,
-                                   Label* fail,
+  void StoreNumberToDoubleElements(Register maybe_number, Register elements,
+                                   Register key, Register scratch1,
+                                   XMMRegister scratch2, Label* fail,
                                    int offset = 0);
 
   // Compare an object's map with the specified map.
@@ -406,9 +386,7 @@
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
   // against maps that are ElementsKind transition maps of the specified map.
-  void CheckMap(Register obj,
-                Handle<Map> map,
-                Label* fail,
+  void CheckMap(Register obj, Handle<Map> map, Label* fail,
                 SmiCheckType smi_check_type);
 
   // Check if the map of an object is equal to a specified weak map and branch
@@ -423,8 +401,7 @@
   // contains the instance_type. The registers map and instance_type can be the
   // same in which case it contains the instance type afterwards. Either of the
   // registers map and instance_type can be the same as heap_object.
-  Condition IsObjectStringType(Register heap_object,
-                               Register map,
+  Condition IsObjectStringType(Register heap_object, Register map,
                                Register instance_type);
 
   // Check if the object in register heap_object is a name. Afterwards the
@@ -432,30 +409,16 @@
   // contains the instance_type. The registers map and instance_type can be the
   // same in which case it contains the instance type afterwards. Either of the
   // registers map and instance_type can be the same as heap_object.
-  Condition IsObjectNameType(Register heap_object,
-                             Register map,
+  Condition IsObjectNameType(Register heap_object, Register map,
                              Register instance_type);
 
-  // Check if a heap object's type is in the JSObject range, not including
-  // JSFunction.  The object's map will be loaded in the map register.
-  // Any or all of the three registers may be the same.
-  // The contents of the scratch register will always be overwritten.
-  void IsObjectJSObjectType(Register heap_object,
-                            Register map,
-                            Register scratch,
-                            Label* fail);
-
-  // The contents of the scratch register will be overwritten.
-  void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
   // FCmp is similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
   void ClampUint8(Register reg);
 
-  void ClampDoubleToUint8(XMMRegister input_reg,
-                          XMMRegister scratch_reg,
+  void ClampDoubleToUint8(XMMRegister input_reg, XMMRegister scratch_reg,
                           Register result_reg);
 
   void SlowTruncateToI(Register result_reg, Register input_reg,
@@ -493,22 +456,19 @@
   void LoadUint32(XMMRegister dst, const Operand& src);
 
   // Jump the register contains a smi.
-  inline void JumpIfSmi(Register value,
-                        Label* smi_label,
+  inline void JumpIfSmi(Register value, Label* smi_label,
                         Label::Distance distance = Label::kFar) {
     test(value, Immediate(kSmiTagMask));
     j(zero, smi_label, distance);
   }
   // Jump if the operand is a smi.
-  inline void JumpIfSmi(Operand value,
-                        Label* smi_label,
+  inline void JumpIfSmi(Operand value, Label* smi_label,
                         Label::Distance distance = Label::kFar) {
     test(value, Immediate(kSmiTagMask));
     j(zero, smi_label, distance);
   }
   // Jump if register contain a non-smi.
-  inline void JumpIfNotSmi(Register value,
-                           Label* not_smi_label,
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
                            Label::Distance distance = Label::kFar) {
     test(value, Immediate(kSmiTagMask));
     j(not_zero, not_smi_label, distance);
@@ -517,6 +477,8 @@
   void LoadInstanceDescriptors(Register map, Register descriptors);
   void EnumLength(Register dst, Register map);
   void NumberOfOwnDescriptors(Register dst, Register map);
+  void LoadAccessor(Register dst, Register holder, int accessor_index,
+                    AccessorComponent accessor);
 
   template<typename Field>
   void DecodeField(Register reg) {
@@ -559,6 +521,13 @@
   // Abort execution if argument is not a name, enabled via --debug-code.
   void AssertName(Register object);
 
+  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+  void AssertFunction(Register object);
+
+  // Abort execution if argument is not a JSBoundFunction,
+  // enabled via --debug-code.
+  void AssertBoundFunction(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object);
@@ -566,17 +535,11 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link it into try handler chain.
-  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+  // Push a new stack handler and link it into stack handler chain.
+  void PushStackHandler();
 
-  // Unlink the stack handler on top of the stack from the try handler chain.
-  void PopTryHandler();
-
-  // Throw to the top handler in the try hander chain.
-  void Throw(Register value);
-
-  // Throw past all JS frames to the top JS entry frame.
-  void ThrowUncatchable(Register value);
+  // Unlink the stack handler on top of the stack from the stack handler chain.
+  void PopStackHandler();
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -584,26 +547,19 @@
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
   // is left untouched, but the scratch register is clobbered.
-  void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch1,
-                              Register scratch2,
-                              Label* miss);
+  void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
+                              Register scratch2, Label* miss);
 
   void GetNumberHash(Register r0, Register scratch);
 
-  void LoadFromNumberDictionary(Label* miss,
-                                Register elements,
-                                Register key,
-                                Register r0,
-                                Register r1,
-                                Register r2,
+  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+                                Register r0, Register r1, Register r2,
                                 Register result);
 
-
   // ---------------------------------------------------------------------------
   // Allocation support
 
-  // Allocate an object in new space or old pointer space. If the given space
+  // Allocate an object in new space or old space. If the given space
   // is exhausted control continues at the gc_required label. The allocated
   // object is returned in result and end of the new object is returned in
   // result_end. The register scratch can be passed as no_reg in which case
@@ -613,54 +569,29 @@
   // result is known to be the allocation top on entry (could be result_end
   // from a previous call). If result_contains_top_on_entry is true scratch
   // should be no_reg as it is never used.
-  void Allocate(int object_size,
-                Register result,
-                Register result_end,
-                Register scratch,
-                Label* gc_required,
-                AllocationFlags flags);
+  void Allocate(int object_size, Register result, Register result_end,
+                Register scratch, Label* gc_required, AllocationFlags flags);
 
-  void Allocate(int header_size,
-                ScaleFactor element_size,
-                Register element_count,
-                RegisterValueType element_count_type,
-                Register result,
-                Register result_end,
-                Register scratch,
-                Label* gc_required,
-                AllocationFlags flags);
+  void Allocate(int header_size, ScaleFactor element_size,
+                Register element_count, RegisterValueType element_count_type,
+                Register result, Register result_end, Register scratch,
+                Label* gc_required, AllocationFlags flags);
 
-  void Allocate(Register object_size,
-                Register result,
-                Register result_end,
-                Register scratch,
-                Label* gc_required,
-                AllocationFlags flags);
-
-  // Undo allocation in new space. The object passed and objects allocated after
-  // it will no longer be allocated. Make sure that no pointers are left to the
-  // object(s) no longer allocated as they would be invalid when allocation is
-  // un-done.
-  void UndoAllocationInNewSpace(Register object);
+  void Allocate(Register object_size, Register result, Register result_end,
+                Register scratch, Label* gc_required, AllocationFlags flags);
 
   // Allocate a heap number in new space with undefined value. The
   // register scratch2 can be passed as no_reg; the others must be
   // valid registers. Returns tagged pointer in result register, or
   // jumps to gc_required if new space is full.
-  void AllocateHeapNumber(Register result,
-                          Register scratch1,
-                          Register scratch2,
-                          Label* gc_required,
-                          MutableMode mode = IMMUTABLE);
+  void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+                          Label* gc_required, MutableMode mode = IMMUTABLE);
 
   // Allocate a sequential string. All the header fields of the string object
   // are initialized.
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
+  void AllocateTwoByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
   void AllocateOneByteString(Register result, Register length,
                              Register scratch1, Register scratch2,
                              Register scratch3, Label* gc_required);
@@ -669,36 +600,34 @@
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
-  void AllocateTwoByteConsString(Register result,
-                          Register scratch1,
-                          Register scratch2,
-                          Label* gc_required);
+  void AllocateTwoByteConsString(Register result, Register scratch1,
+                                 Register scratch2, Label* gc_required);
   void AllocateOneByteConsString(Register result, Register scratch1,
                                  Register scratch2, Label* gc_required);
 
   // Allocate a raw sliced string object. Only the map field of the result is
   // initialized.
-  void AllocateTwoByteSlicedString(Register result,
-                            Register scratch1,
-                            Register scratch2,
-                            Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result, Register scratch1,
+                                   Register scratch2, Label* gc_required);
   void AllocateOneByteSlicedString(Register result, Register scratch1,
                                    Register scratch2, Label* gc_required);
 
+  // Allocate and initialize a JSValue wrapper with the specified {constructor}
+  // and {value}.
+  void AllocateJSValue(Register result, Register constructor, Register value,
+                       Register scratch, Label* gc_required);
+
   // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   // long or aligned copies.
   // The contents of index and scratch are destroyed.
-  void CopyBytes(Register source,
-                 Register destination,
-                 Register length,
+  void CopyBytes(Register source, Register destination, Register length,
                  Register scratch);
 
-  // Initialize fields with filler values.  Fields starting at |start_offset|
-  // not including end_offset are overwritten with the value in |filler|.  At
-  // the end the loop, |start_offset| takes the value of |end_offset|.
-  void InitializeFieldsWithFiller(Register start_offset,
-                                  Register end_offset,
-                                  Register filler);
+  // Initialize fields with filler values.  Fields starting at |current_address|
+  // not including |end_address| are overwritten with the value in |filler|.  At
+  // the end the loop, |current_address| takes the value of |end_address|.
+  void InitializeFieldsWithFiller(Register current_address,
+                                  Register end_address, Register filler);
 
   // ---------------------------------------------------------------------------
   // Support functions.
@@ -714,16 +643,17 @@
   void NegativeZeroTest(Register result, Register op1, Register op2,
                         Register scratch, Label* then_label);
 
+  // Machine code version of Map::GetConstructor().
+  // |temp| holds |result|'s map when done.
+  void GetMapConstructor(Register result, Register map, Register temp);
+
   // Try to get function prototype of a function and puts the value in
   // the result register. Checks that the function really is a
   // function and jumps to the miss label if the fast checks fail. The
   // function register will be untouched; the other registers may be
   // clobbered.
-  void TryGetFunctionPrototype(Register function,
-                               Register result,
-                               Register scratch,
-                               Label* miss,
-                               bool miss_on_bound_function = false);
+  void TryGetFunctionPrototype(Register function, Register result,
+                               Register scratch, Label* miss);
 
   // Picks out an array index from the hash field.
   // Register use:
@@ -744,35 +674,31 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  void CallRuntime(const Runtime::Function* f,
-                   int num_arguments,
+  void CallRuntime(const Runtime::Function* f, int num_arguments,
                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
-    const Runtime::Function* function = Runtime::FunctionForId(id);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+    const Runtime::Function* function = Runtime::FunctionForId(fid);
     CallRuntime(function, function->nargs, kSaveFPRegs);
   }
 
   // Convenience function: Same as above, but takes the fid instead.
-  void CallRuntime(Runtime::FunctionId id,
-                   int num_arguments,
+  void CallRuntime(Runtime::FunctionId fid,
                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
-    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+    const Runtime::Function* function = Runtime::FunctionForId(fid);
+    CallRuntime(function, function->nargs, save_doubles);
+  }
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
   }
 
   // Convenience function: call an external reference.
   void CallExternalReference(ExternalReference ref, int num_arguments);
 
-  // Tail call of a runtime routine (jump).
-  // Like JumpToExternalReference, but also takes care of passing the number
-  // of parameters.
-  void TailCallExternalReference(const ExternalReference& ext,
-                                 int num_arguments,
-                                 int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
-  void TailCallRuntime(Runtime::FunctionId fid,
-                       int num_arguments,
-                       int result_size);
+  void TailCallRuntime(Runtime::FunctionId fid);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -791,24 +717,6 @@
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, int num_arguments);
 
-  // Prepares stack to put arguments (aligns and so on). Reserves
-  // space for return value if needed (assumes the return value is a handle).
-  // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
-  // etc. Saves context (esi). If space was reserved for return value then
-  // stores the pointer to the reserved slot into esi.
-  void PrepareCallApiFunction(int argc);
-
-  // Calls an API function.  Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions.  Clobbers ebx, edi and
-  // caller-save registers.  Restores context.  On return removes
-  // stack_space * kPointerSize (GCed).
-  void CallApiFunctionAndReturn(Register function_address,
-                                ExternalReference thunk_ref,
-                                Operand thunk_last_arg,
-                                int stack_space,
-                                Operand return_value_operand,
-                                Operand* context_restore_operand);
-
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext);
 
@@ -826,8 +734,31 @@
   void Drop(int element_count);
 
   void Call(Label* target) { call(target); }
+  void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+  void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
   void Push(Register src) { push(src); }
+  void Push(const Operand& src) { push(src); }
+  void Push(Immediate value) { push(value); }
   void Pop(Register dst) { pop(dst); }
+  void Pop(const Operand& dst) { pop(dst); }
+  void PushReturnAddressFrom(Register src) { push(src); }
+  void PopReturnAddressTo(Register dst) { pop(dst); }
+
+  // Non-SSE2 instructions.
+  void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+  void Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+    Pinsrd(dst, Operand(src), imm8);
+  }
+  void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+
+  void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
+  void Lzcnt(Register dst, const Operand& src);
+
+  void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+  void Tzcnt(Register dst, const Operand& src);
+
+  void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+  void Popcnt(Register dst, const Operand& src);
 
   // Emit call to the code we are currently generating.
   void CallSelf() {
@@ -847,9 +778,11 @@
   void Move(XMMRegister dst, uint64_t src);
   void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
 
+  void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
   // Push a handle value.
   void Push(Handle<Object> handle) { push(Immediate(handle)); }
-  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+  void Push(Smi* smi) { Push(Immediate(smi)); }
 
   Handle<Object> CodeObject() {
     DCHECK(!code_object_.is_null());
@@ -869,7 +802,6 @@
   void IncrementCounter(Condition cc, StatsCounter* counter, int value);
   void DecrementCounter(Condition cc, StatsCounter* counter, int value);
 
-
   // ---------------------------------------------------------------------------
   // Debugging
 
@@ -898,17 +830,6 @@
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Generate code to do a lookup in the number string cache. If the number in
-  // the register object is found in the cache the generated code falls through
-  // with the result in the result register. The object and the result register
-  // can be the same. If the number is not found in the cache the code jumps to
-  // the label not_found with only the content of register object unchanged.
-  void LookupNumberStringCache(Register object,
-                               Register result,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* not_found);
-
   // Check whether the instance type represents a flat one-byte string. Jump to
   // the label if not. If the instance type can be scratched specify same
   // register for both instance type and scratch.
@@ -931,15 +852,16 @@
   void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
                                        Label::Distance distance = Label::kFar);
 
-  void EmitSeqStringSetCharCheck(Register string,
-                                 Register index,
-                                 Register value,
-                                 uint32_t encoding_mask);
+  void EmitSeqStringSetCharCheck(Register string, Register index,
+                                 Register value, uint32_t encoding_mask);
 
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Load the type feedback vector from a JavaScript frame.
+  void EmitLoadTypeFeedbackVector(Register vector);
+
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
@@ -981,14 +903,10 @@
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
-                      const ParameterCount& actual,
-                      Handle<Code> code_constant,
-                      const Operand& code_operand,
-                      Label* done,
-                      bool* definitely_mismatches,
-                      InvokeFlag flag,
+                      const ParameterCount& actual, Label* done,
+                      bool* definitely_mismatches, InvokeFlag flag,
                       Label::Distance done_distance,
-                      const CallWrapper& call_wrapper = NullCallWrapper());
+                      const CallWrapper& call_wrapper);
 
   void EnterExitFramePrologue();
   void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -996,18 +914,14 @@
   void LeaveExitFrameEpilogue(bool restore_context);
 
   // Allocation support helpers.
-  void LoadAllocationTopHelper(Register result,
-                               Register scratch,
+  void LoadAllocationTopHelper(Register result, Register scratch,
                                AllocationFlags flags);
 
-  void UpdateAllocationTopHelper(Register result_end,
-                                 Register scratch,
+  void UpdateAllocationTopHelper(Register result_end, Register scratch,
                                  AllocationFlags flags);
 
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
+  void InNewSpace(Register object, Register scratch, Condition cc,
                   Label* condition_met,
                   Label::Distance condition_met_distance = Label::kFar);
 
@@ -1015,14 +929,9 @@
   // bitmap register points at the word with the mark bits and the mask
   // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
   // unchanged.
-  inline void GetMarkBits(Register addr_reg,
-                          Register bitmap_reg,
+  inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
                           Register mask_reg);
 
-  // Helper for throwing exceptions.  Compute a handler address and jump to
-  // it.  See the implementation for register usage.
-  void JumpToHandlerEntry();
-
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
   static int SafepointRegisterStackIndex(int reg_code);
@@ -1032,7 +941,6 @@
   friend class StandardFrame;
 };
 
-
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -1040,19 +948,18 @@
 // an assertion.
 class CodePatcher {
  public:
-  CodePatcher(byte* address, int size);
-  virtual ~CodePatcher();
+  CodePatcher(Isolate* isolate, byte* address, int size);
+  ~CodePatcher();
 
   // Macro assembler to emit code.
   MacroAssembler* masm() { return &masm_; }
 
  private:
-  byte* address_;  // The address of the code being patched.
-  int size_;  // Number of bytes of the expected patch size.
+  byte* address_;        // The address of the code being patched.
+  int size_;             // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
 
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
@@ -1061,37 +968,29 @@
   return Operand(object, offset - kHeapObjectTag);
 }
 
-
 // Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
-                            Register index,
-                            ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
                             int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
-
-inline Operand FixedArrayElementOperand(Register array,
-                                        Register index_as_smi,
+inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
                                         int additional_offset = 0) {
   int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
   return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
 }
 
-
 inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
-
-inline Operand GlobalObjectOperand() {
-  return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand ContextOperand(Register context, Register index) {
+  return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
 }
 
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
+inline Operand NativeContextOperand() {
+  return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
+}
 
 #ifdef GENERATED_CODE_COVERAGE
 extern void LogGeneratedCodeCoverage(const char* file_line);
@@ -1114,7 +1013,7 @@
 #define ACCESS_MASM(masm) masm->
 #endif
 
-
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
deleted file mode 100644
index 4118db8..0000000
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ /dev/null
@@ -1,1310 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/cpu-profiler.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
-#include "src/unicode.h"
-
-#include "src/ia32/regexp-macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - edx : Current character.  Must be loaded using LoadCurrentCharacter
- *         before using any of the dispatch methods.  Temporarily stores the
- *         index of capture start after a matching pass for a global regexp.
- * - edi : Current position in input, as negative offset from end of string.
- *         Please notice that this is the byte offset, not the character offset!
- * - esi : end of input (points to byte after last character in input).
- * - ebp : Frame pointer.  Used to access arguments, local variables and
- *         RegExp registers.
- * - esp : Points to tip of C stack.
- * - ecx : Points to tip of backtrack stack
- *
- * The registers eax and ebx are free to use for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- *       - Isolate* isolate     (address of the current isolate)
- *       - direct_call          (if 1, direct call from JavaScript code, if 0
- *                               call through the runtime system)
- *       - stack_area_base      (high end of the memory area to use as
- *                               backtracking stack)
- *       - capture array size   (may fit multiple sets of matches)
- *       - int* capture_array   (int[num_saved_registers_], for output).
- *       - end of input         (address of end of string)
- *       - start of input       (address of first character in string)
- *       - start index          (character index of start)
- *       - String* input_string (location of a handle containing the string)
- *       --- frame alignment (if applicable) ---
- *       - return address
- * ebp-> - old ebp
- *       - backup of caller esi
- *       - backup of caller edi
- *       - backup of caller ebx
- *       - success counter      (only for global regexps to count matches).
- *       - Offset of location before start of input (effectively character
- *         position -1). Used to initialize capture registers to a non-position.
- *       - register 0  ebp[-4]  (only positions must be stored in the first
- *       - register 1  ebp[-8]   num_saved_registers_ registers)
- *       - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- *              int start_index,
- *              Address start,
- *              Address end,
- *              int* capture_output_array,
- *              bool at_start,
- *              byte* stack_area_base,
- *              bool direct_call)
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
-    Mode mode,
-    int registers_to_save,
-    Zone* zone)
-    : NativeRegExpMacroAssembler(zone),
-      masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
-      mode_(mode),
-      num_registers_(registers_to_save),
-      num_saved_registers_(registers_to_save),
-      entry_label_(),
-      start_label_(),
-      success_label_(),
-      backtrack_label_(),
-      exit_label_() {
-  DCHECK_EQ(0, registers_to_save % 2);
-  __ jmp(&entry_label_);   // We'll write the entry code later.
-  __ bind(&start_label_);  // And then continue from here.
-}
-
-
-RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
-  delete masm_;
-  // Unuse labels in case we throw away the assembler without calling GetCode.
-  entry_label_.Unuse();
-  start_label_.Unuse();
-  success_label_.Unuse();
-  backtrack_label_.Unuse();
-  exit_label_.Unuse();
-  check_preempt_label_.Unuse();
-  stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerIA32::stack_limit_slack()  {
-  return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
-  if (by != 0) {
-    __ add(edi, Immediate(by * char_size()));
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
-  DCHECK(reg >= 0);
-  DCHECK(reg < num_registers_);
-  if (by != 0) {
-    __ add(register_location(reg), Immediate(by));
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::Backtrack() {
-  CheckPreemption();
-  // Pop Code* offset from backtrack stack, add Code* and jump to location.
-  Pop(ebx);
-  __ add(ebx, Immediate(masm_->CodeObject()));
-  __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::Bind(Label* label) {
-  __ bind(label);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
-  __ cmp(current_character(), c);
-  BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
-  __ cmp(current_character(), limit);
-  BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
-  Label not_at_start;
-  // Did we start the match at the start of the string at all?
-  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
-  BranchOrBacktrack(not_equal, &not_at_start);
-  // If we did, are we still at the start of the input?
-  __ lea(eax, Operand(esi, edi, times_1, 0));
-  __ cmp(eax, Operand(ebp, kInputStart));
-  BranchOrBacktrack(equal, on_at_start);
-  __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
-  // Did we start the match at the start of the string at all?
-  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
-  BranchOrBacktrack(not_equal, on_not_at_start);
-  // If we did, are we still at the start of the input?
-  __ lea(eax, Operand(esi, edi, times_1, 0));
-  __ cmp(eax, Operand(ebp, kInputStart));
-  BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
-  __ cmp(current_character(), limit);
-  BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
-  Label fallthrough;
-  __ cmp(edi, Operand(backtrack_stackpointer(), 0));
-  __ j(not_equal, &fallthrough);
-  __ add(backtrack_stackpointer(), Immediate(kPointerSize));  // Pop.
-  BranchOrBacktrack(no_condition, on_equal);
-  __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
-    int start_reg,
-    Label* on_no_match) {
-  Label fallthrough;
-  __ mov(edx, register_location(start_reg));  // Index of start of capture
-  __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
-  __ sub(ebx, edx);  // Length of capture.
-
-  // The length of a capture should not be negative. This can only happen
-  // if the end of the capture is unrecorded, or at a point earlier than
-  // the start of the capture.
-  BranchOrBacktrack(less, on_no_match);
-
-  // If length is zero, either the capture is empty or it is completely
-  // uncaptured. In either case succeed immediately.
-  __ j(equal, &fallthrough);
-
-  // Check that there are sufficient characters left in the input.
-  __ mov(eax, edi);
-  __ add(eax, ebx);
-  BranchOrBacktrack(greater, on_no_match);
-
-  if (mode_ == LATIN1) {
-    Label success;
-    Label fail;
-    Label loop_increment;
-    // Save register contents to make the registers available below.
-    __ push(edi);
-    __ push(backtrack_stackpointer());
-    // After this, the eax, ecx, and edi registers are available.
-
-    __ add(edx, esi);  // Start of capture
-    __ add(edi, esi);  // Start of text to match against capture.
-    __ add(ebx, edi);  // End of text to match against capture.
-
-    Label loop;
-    __ bind(&loop);
-    __ movzx_b(eax, Operand(edi, 0));
-    __ cmpb_al(Operand(edx, 0));
-    __ j(equal, &loop_increment);
-
-    // Mismatch, try case-insensitive match (converting letters to lower-case).
-    __ or_(eax, 0x20);  // Convert match character to lower-case.
-    __ lea(ecx, Operand(eax, -'a'));
-    __ cmp(ecx, static_cast<int32_t>('z' - 'a'));  // Is eax a lowercase letter?
-    Label convert_capture;
-    __ j(below_equal, &convert_capture);  // In range 'a'-'z'.
-    // Latin-1: Check for values in range [224,254] but not 247.
-    __ sub(ecx, Immediate(224 - 'a'));
-    __ cmp(ecx, Immediate(254 - 224));
-    __ j(above, &fail);  // Weren't Latin-1 letters.
-    __ cmp(ecx, Immediate(247 - 224));  // Check for 247.
-    __ j(equal, &fail);
-    __ bind(&convert_capture);
-    // Also convert capture character.
-    __ movzx_b(ecx, Operand(edx, 0));
-    __ or_(ecx, 0x20);
-
-    __ cmp(eax, ecx);
-    __ j(not_equal, &fail);
-
-    __ bind(&loop_increment);
-    // Increment pointers into match and capture strings.
-    __ add(edx, Immediate(1));
-    __ add(edi, Immediate(1));
-    // Compare to end of match, and loop if not done.
-    __ cmp(edi, ebx);
-    __ j(below, &loop);
-    __ jmp(&success);
-
-    __ bind(&fail);
-    // Restore original values before failing.
-    __ pop(backtrack_stackpointer());
-    __ pop(edi);
-    BranchOrBacktrack(no_condition, on_no_match);
-
-    __ bind(&success);
-    // Restore original value before continuing.
-    __ pop(backtrack_stackpointer());
-    // Drop original value of character position.
-    __ add(esp, Immediate(kPointerSize));
-    // Compute new value of character position after the matched part.
-    __ sub(edi, esi);
-  } else {
-    DCHECK(mode_ == UC16);
-    // Save registers before calling C function.
-    __ push(esi);
-    __ push(edi);
-    __ push(backtrack_stackpointer());
-    __ push(ebx);
-
-    static const int argument_count = 4;
-    __ PrepareCallCFunction(argument_count, ecx);
-    // Put arguments into allocated stack area, last argument highest on stack.
-    // Parameters are
-    //   Address byte_offset1 - Address captured substring's start.
-    //   Address byte_offset2 - Address of current character position.
-    //   size_t byte_length - length of capture in bytes(!)
-    //   Isolate* isolate
-
-    // Set isolate.
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(isolate())));
-    // Set byte_length.
-    __ mov(Operand(esp, 2 * kPointerSize), ebx);
-    // Set byte_offset2.
-    // Found by adding negative string-end offset of current position (edi)
-    // to end of string.
-    __ add(edi, esi);
-    __ mov(Operand(esp, 1 * kPointerSize), edi);
-    // Set byte_offset1.
-    // Start of capture, where edx already holds string-end negative offset.
-    __ add(edx, esi);
-    __ mov(Operand(esp, 0 * kPointerSize), edx);
-
-    {
-      AllowExternalCallThatCantCauseGC scope(masm_);
-      ExternalReference compare =
-          ExternalReference::re_case_insensitive_compare_uc16(isolate());
-      __ CallCFunction(compare, argument_count);
-    }
-    // Pop original values before reacting on result value.
-    __ pop(ebx);
-    __ pop(backtrack_stackpointer());
-    __ pop(edi);
-    __ pop(esi);
-
-    // Check if function returned non-zero for success or zero for failure.
-    __ or_(eax, eax);
-    BranchOrBacktrack(zero, on_no_match);
-    // On success, increment position by length of capture.
-    __ add(edi, ebx);
-  }
-  __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
-    int start_reg,
-    Label* on_no_match) {
-  Label fallthrough;
-  Label success;
-  Label fail;
-
-  // Find length of back-referenced capture.
-  __ mov(edx, register_location(start_reg));
-  __ mov(eax, register_location(start_reg + 1));
-  __ sub(eax, edx);  // Length to check.
-  // Fail on partial or illegal capture (start of capture after end of capture).
-  BranchOrBacktrack(less, on_no_match);
-  // Succeed on empty capture (including no capture)
-  __ j(equal, &fallthrough);
-
-  // Check that there are sufficient characters left in the input.
-  __ mov(ebx, edi);
-  __ add(ebx, eax);
-  BranchOrBacktrack(greater, on_no_match);
-
-  // Save register to make it available below.
-  __ push(backtrack_stackpointer());
-
-  // Compute pointers to match string and capture string
-  __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
-  __ add(edx, esi);  // Start of capture.
-  __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
-
-  Label loop;
-  __ bind(&loop);
-  if (mode_ == LATIN1) {
-    __ movzx_b(eax, Operand(edx, 0));
-    __ cmpb_al(Operand(ebx, 0));
-  } else {
-    DCHECK(mode_ == UC16);
-    __ movzx_w(eax, Operand(edx, 0));
-    __ cmpw_ax(Operand(ebx, 0));
-  }
-  __ j(not_equal, &fail);
-  // Increment pointers into capture and match string.
-  __ add(edx, Immediate(char_size()));
-  __ add(ebx, Immediate(char_size()));
-  // Check if we have reached end of match area.
-  __ cmp(ebx, ecx);
-  __ j(below, &loop);
-  __ jmp(&success);
-
-  __ bind(&fail);
-  // Restore backtrack stackpointer.
-  __ pop(backtrack_stackpointer());
-  BranchOrBacktrack(no_condition, on_no_match);
-
-  __ bind(&success);
-  // Move current character position to position after match.
-  __ mov(edi, ecx);
-  __ sub(edi, esi);
-  // Restore backtrack stackpointer.
-  __ pop(backtrack_stackpointer());
-
-  __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
-                                                 Label* on_not_equal) {
-  __ cmp(current_character(), c);
-  BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
-                                                      uint32_t mask,
-                                                      Label* on_equal) {
-  if (c == 0) {
-    __ test(current_character(), Immediate(mask));
-  } else {
-    __ mov(eax, mask);
-    __ and_(eax, current_character());
-    __ cmp(eax, c);
-  }
-  BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
-                                                         uint32_t mask,
-                                                         Label* on_not_equal) {
-  if (c == 0) {
-    __ test(current_character(), Immediate(mask));
-  } else {
-    __ mov(eax, mask);
-    __ and_(eax, current_character());
-    __ cmp(eax, c);
-  }
-  BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
-    uc16 c,
-    uc16 minus,
-    uc16 mask,
-    Label* on_not_equal) {
-  DCHECK(minus < String::kMaxUtf16CodeUnit);
-  __ lea(eax, Operand(current_character(), -minus));
-  if (c == 0) {
-    __ test(eax, Immediate(mask));
-  } else {
-    __ and_(eax, mask);
-    __ cmp(eax, c);
-  }
-  BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterInRange(
-    uc16 from,
-    uc16 to,
-    Label* on_in_range) {
-  __ lea(eax, Operand(current_character(), -from));
-  __ cmp(eax, to - from);
-  BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
-    uc16 from,
-    uc16 to,
-    Label* on_not_in_range) {
-  __ lea(eax, Operand(current_character(), -from));
-  __ cmp(eax, to - from);
-  BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckBitInTable(
-    Handle<ByteArray> table,
-    Label* on_bit_set) {
-  __ mov(eax, Immediate(table));
-  Register index = current_character();
-  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
-    __ mov(ebx, kTableSize - 1);
-    __ and_(ebx, current_character());
-    index = ebx;
-  }
-  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
-  BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
-                                                          Label* on_no_match) {
-  // Range checks (c in min..max) are generally implemented by an unsigned
-  // (c - min) <= (max - min) check
-  switch (type) {
-  case 's':
-    // Match space-characters
-    if (mode_ == LATIN1) {
-      // One byte space characters are '\t'..'\r', ' ' and \u00a0.
-      Label success;
-      __ cmp(current_character(), ' ');
-      __ j(equal, &success, Label::kNear);
-      // Check range 0x09..0x0d
-      __ lea(eax, Operand(current_character(), -'\t'));
-      __ cmp(eax, '\r' - '\t');
-      __ j(below_equal, &success, Label::kNear);
-      // \u00a0 (NBSP).
-      __ cmp(eax, 0x00a0 - '\t');
-      BranchOrBacktrack(not_equal, on_no_match);
-      __ bind(&success);
-      return true;
-    }
-    return false;
-  case 'S':
-    // The emitted code for generic character classes is good enough.
-    return false;
-  case 'd':
-    // Match ASCII digits ('0'..'9')
-    __ lea(eax, Operand(current_character(), -'0'));
-    __ cmp(eax, '9' - '0');
-    BranchOrBacktrack(above, on_no_match);
-    return true;
-  case 'D':
-    // Match non ASCII-digits
-    __ lea(eax, Operand(current_character(), -'0'));
-    __ cmp(eax, '9' - '0');
-    BranchOrBacktrack(below_equal, on_no_match);
-    return true;
-  case '.': {
-    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
-    __ mov(eax, current_character());
-    __ xor_(eax, Immediate(0x01));
-    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(eax, Immediate(0x0b));
-    __ cmp(eax, 0x0c - 0x0b);
-    BranchOrBacktrack(below_equal, on_no_match);
-    if (mode_ == UC16) {
-      // Compare original value to 0x2028 and 0x2029, using the already
-      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
-      // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(eax, Immediate(0x2028 - 0x0b));
-      __ cmp(eax, 0x2029 - 0x2028);
-      BranchOrBacktrack(below_equal, on_no_match);
-    }
-    return true;
-  }
-  case 'w': {
-    if (mode_ != LATIN1) {
-      // Table is 256 entries, so all Latin1 characters can be tested.
-      __ cmp(current_character(), Immediate('z'));
-      BranchOrBacktrack(above, on_no_match);
-    }
-    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
-    ExternalReference word_map = ExternalReference::re_word_character_map();
-    __ test_b(current_character(),
-              Operand::StaticArray(current_character(), times_1, word_map));
-    BranchOrBacktrack(zero, on_no_match);
-    return true;
-  }
-  case 'W': {
-    Label done;
-    if (mode_ != LATIN1) {
-      // Table is 256 entries, so all Latin1 characters can be tested.
-      __ cmp(current_character(), Immediate('z'));
-      __ j(above, &done);
-    }
-    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
-    ExternalReference word_map = ExternalReference::re_word_character_map();
-    __ test_b(current_character(),
-              Operand::StaticArray(current_character(), times_1, word_map));
-    BranchOrBacktrack(not_zero, on_no_match);
-    if (mode_ != LATIN1) {
-      __ bind(&done);
-    }
-    return true;
-  }
-  // Non-standard classes (with no syntactic shorthand) used internally.
-  case '*':
-    // Match any character.
-    return true;
-  case 'n': {
-    // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
-    // The opposite of '.'.
-    __ mov(eax, current_character());
-    __ xor_(eax, Immediate(0x01));
-    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(eax, Immediate(0x0b));
-    __ cmp(eax, 0x0c - 0x0b);
-    if (mode_ == LATIN1) {
-      BranchOrBacktrack(above, on_no_match);
-    } else {
-      Label done;
-      BranchOrBacktrack(below_equal, &done);
-      DCHECK_EQ(UC16, mode_);
-      // Compare original value to 0x2028 and 0x2029, using the already
-      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
-      // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(eax, Immediate(0x2028 - 0x0b));
-      __ cmp(eax, 1);
-      BranchOrBacktrack(above, on_no_match);
-      __ bind(&done);
-    }
-    return true;
-  }
-  // No custom implementation (yet): s(UC16), S(UC16).
-  default:
-    return false;
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::Fail() {
-  STATIC_ASSERT(FAILURE == 0);  // Return value for failure is zero.
-  if (!global()) {
-    __ Move(eax, Immediate(FAILURE));
-  }
-  __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
-  Label return_eax;
-  // Finalize code - write the entry point code now we know how many
-  // registers we need.
-
-  // Entry code:
-  __ bind(&entry_label_);
-
-  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
-  // code is generated.
-  FrameScope scope(masm_, StackFrame::MANUAL);
-
-  // Actually emit code to start a new stack frame.
-  __ push(ebp);
-  __ mov(ebp, esp);
-  // Save callee-save registers. Order here should correspond to order of
-  // kBackup_ebx etc.
-  __ push(esi);
-  __ push(edi);
-  __ push(ebx);  // Callee-save on MacOS.
-  __ push(Immediate(0));  // Number of successful matches in a global regexp.
-  __ push(Immediate(0));  // Make room for "input start - 1" constant.
-
-  // Check if we have space on the stack for registers.
-  Label stack_limit_hit;
-  Label stack_ok;
-
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
-  __ mov(ecx, esp);
-  __ sub(ecx, Operand::StaticVariable(stack_limit));
-  // Handle it if the stack pointer is already below the stack limit.
-  __ j(below_equal, &stack_limit_hit);
-  // Check if there is room for the variable number of registers above
-  // the stack limit.
-  __ cmp(ecx, num_registers_ * kPointerSize);
-  __ j(above_equal, &stack_ok);
-  // Exit with OutOfMemory exception. There is not enough space on the stack
-  // for our working registers.
-  __ mov(eax, EXCEPTION);
-  __ jmp(&return_eax);
-
-  __ bind(&stack_limit_hit);
-  CallCheckStackGuardState(ebx);
-  __ or_(eax, eax);
-  // If returned value is non-zero, we exit with the returned value as result.
-  __ j(not_zero, &return_eax);
-
-  __ bind(&stack_ok);
-  // Load start index for later use.
-  __ mov(ebx, Operand(ebp, kStartIndex));
-
-  // Allocate space on stack for registers.
-  __ sub(esp, Immediate(num_registers_ * kPointerSize));
-  // Load string length.
-  __ mov(esi, Operand(ebp, kInputEnd));
-  // Load input position.
-  __ mov(edi, Operand(ebp, kInputStart));
-  // Set up edi to be negative offset from string end.
-  __ sub(edi, esi);
-
-  // Set eax to address of char before start of the string.
-  // (effectively string position -1).
-  __ neg(ebx);
-  if (mode_ == UC16) {
-    __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
-  } else {
-    __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
-  }
-  // Store this value in a local variable, for use when clearing
-  // position registers.
-  __ mov(Operand(ebp, kInputStartMinusOne), eax);
-
-#if V8_OS_WIN
-  // Ensure that we write to each stack page, in order. Skipping a page
-  // on Windows can cause segmentation faults. Assuming page size is 4k.
-  const int kPageSize = 4096;
-  const int kRegistersPerPage = kPageSize / kPointerSize;
-  for (int i = num_saved_registers_ + kRegistersPerPage - 1;
-      i < num_registers_;
-      i += kRegistersPerPage) {
-    __ mov(register_location(i), eax);  // One write every page.
-  }
-#endif  // V8_OS_WIN
-
-  Label load_char_start_regexp, start_regexp;
-  // Load newline if index is at start, previous character otherwise.
-  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
-  __ j(not_equal, &load_char_start_regexp, Label::kNear);
-  __ mov(current_character(), '\n');
-  __ jmp(&start_regexp, Label::kNear);
-
-  // Global regexp restarts matching here.
-  __ bind(&load_char_start_regexp);
-  // Load previous char as initial value of current character register.
-  LoadCurrentCharacterUnchecked(-1, 1);
-  __ bind(&start_regexp);
-
-  // Initialize on-stack registers.
-  if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
-    // Fill saved registers with initial value = start offset - 1
-    // Fill in stack push order, to avoid accessing across an unwritten
-    // page (a problem on Windows).
-    if (num_saved_registers_ > 8) {
-      __ mov(ecx, kRegisterZero);
-      Label init_loop;
-      __ bind(&init_loop);
-      __ mov(Operand(ebp, ecx, times_1, 0), eax);
-      __ sub(ecx, Immediate(kPointerSize));
-      __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
-      __ j(greater, &init_loop);
-    } else {  // Unroll the loop.
-      for (int i = 0; i < num_saved_registers_; i++) {
-        __ mov(register_location(i), eax);
-      }
-    }
-  }
-
-  // Initialize backtrack stack pointer.
-  __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
-  __ jmp(&start_label_);
-
-  // Exit code:
-  if (success_label_.is_linked()) {
-    // Save captures when successful.
-    __ bind(&success_label_);
-    if (num_saved_registers_ > 0) {
-      // copy captures to output
-      __ mov(ebx, Operand(ebp, kRegisterOutput));
-      __ mov(ecx, Operand(ebp, kInputEnd));
-      __ mov(edx, Operand(ebp, kStartIndex));
-      __ sub(ecx, Operand(ebp, kInputStart));
-      if (mode_ == UC16) {
-        __ lea(ecx, Operand(ecx, edx, times_2, 0));
-      } else {
-        __ add(ecx, edx);
-      }
-      for (int i = 0; i < num_saved_registers_; i++) {
-        __ mov(eax, register_location(i));
-        if (i == 0 && global_with_zero_length_check()) {
-          // Keep capture start in edx for the zero-length check later.
-          __ mov(edx, eax);
-        }
-        // Convert to index from start of string, not end.
-        __ add(eax, ecx);
-        if (mode_ == UC16) {
-          __ sar(eax, 1);  // Convert byte index to character index.
-        }
-        __ mov(Operand(ebx, i * kPointerSize), eax);
-      }
-    }
-
-    if (global()) {
-    // Restart matching if the regular expression is flagged as global.
-      // Increment success counter.
-      __ inc(Operand(ebp, kSuccessfulCaptures));
-      // Capture results have been stored, so the number of remaining global
-      // output registers is reduced by the number of stored captures.
-      __ mov(ecx, Operand(ebp, kNumOutputRegisters));
-      __ sub(ecx, Immediate(num_saved_registers_));
-      // Check whether we have enough room for another set of capture results.
-      __ cmp(ecx, Immediate(num_saved_registers_));
-      __ j(less, &exit_label_);
-
-      __ mov(Operand(ebp, kNumOutputRegisters), ecx);
-      // Advance the location for output.
-      __ add(Operand(ebp, kRegisterOutput),
-             Immediate(num_saved_registers_ * kPointerSize));
-
-      // Prepare eax to initialize registers with its value in the next run.
-      __ mov(eax, Operand(ebp, kInputStartMinusOne));
-
-      if (global_with_zero_length_check()) {
-        // Special case for zero-length matches.
-        // edx: capture start index
-        __ cmp(edi, edx);
-        // Not a zero-length match, restart.
-        __ j(not_equal, &load_char_start_regexp);
-        // edi (offset from the end) is zero if we already reached the end.
-        __ test(edi, edi);
-        __ j(zero, &exit_label_, Label::kNear);
-        // Advance current position after a zero-length match.
-        if (mode_ == UC16) {
-          __ add(edi, Immediate(2));
-        } else {
-          __ inc(edi);
-        }
-      }
-
-      __ jmp(&load_char_start_regexp);
-    } else {
-      __ mov(eax, Immediate(SUCCESS));
-    }
-  }
-
-  __ bind(&exit_label_);
-  if (global()) {
-    // Return the number of successful captures.
-    __ mov(eax, Operand(ebp, kSuccessfulCaptures));
-  }
-
-  __ bind(&return_eax);
-  // Skip esp past regexp registers.
-  __ lea(esp, Operand(ebp, kBackup_ebx));
-  // Restore callee-save registers.
-  __ pop(ebx);
-  __ pop(edi);
-  __ pop(esi);
-  // Exit function frame, restore previous one.
-  __ pop(ebp);
-  __ ret(0);
-
-  // Backtrack code (branch target for conditional backtracks).
-  if (backtrack_label_.is_linked()) {
-    __ bind(&backtrack_label_);
-    Backtrack();
-  }
-
-  Label exit_with_exception;
-
-  // Preempt-code
-  if (check_preempt_label_.is_linked()) {
-    SafeCallTarget(&check_preempt_label_);
-
-    __ push(backtrack_stackpointer());
-    __ push(edi);
-
-    CallCheckStackGuardState(ebx);
-    __ or_(eax, eax);
-    // If returning non-zero, we should end execution with the given
-    // result as return value.
-    __ j(not_zero, &return_eax);
-
-    __ pop(edi);
-    __ pop(backtrack_stackpointer());
-    // String might have moved: Reload esi from frame.
-    __ mov(esi, Operand(ebp, kInputEnd));
-    SafeReturn();
-  }
-
-  // Backtrack stack overflow code.
-  if (stack_overflow_label_.is_linked()) {
-    SafeCallTarget(&stack_overflow_label_);
-    // Reached if the backtrack-stack limit has been hit.
-
-    Label grow_failed;
-    // Save registers before calling C function
-    __ push(esi);
-    __ push(edi);
-
-    // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 3;
-    __ PrepareCallCFunction(num_arguments, ebx);
-    __ mov(Operand(esp, 2 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(isolate())));
-    __ lea(eax, Operand(ebp, kStackHighEnd));
-    __ mov(Operand(esp, 1 * kPointerSize), eax);
-    __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
-    ExternalReference grow_stack =
-        ExternalReference::re_grow_stack(isolate());
-    __ CallCFunction(grow_stack, num_arguments);
-    // If return NULL, we have failed to grow the stack, and
-    // must exit with a stack-overflow exception.
-    __ or_(eax, eax);
-    __ j(equal, &exit_with_exception);
-    // Otherwise use return value as new stack pointer.
-    __ mov(backtrack_stackpointer(), eax);
-    // Restore saved registers and continue.
-    __ pop(edi);
-    __ pop(esi);
-    SafeReturn();
-  }
-
-  if (exit_with_exception.is_linked()) {
-    // If any of the code above needed to exit with an exception.
-    __ bind(&exit_with_exception);
-    // Exit with Result EXCEPTION(-1) to signal thrown exception.
-    __ mov(eax, EXCEPTION);
-    __ jmp(&return_eax);
-  }
-
-  CodeDesc code_desc;
-  masm_->GetCode(&code_desc);
-  Handle<Code> code =
-      isolate()->factory()->NewCode(code_desc,
-                                    Code::ComputeFlags(Code::REGEXP),
-                                    masm_->CodeObject());
-  PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
-  return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerIA32::GoTo(Label* to) {
-  BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
-                                            int comparand,
-                                            Label* if_ge) {
-  __ cmp(register_location(reg), Immediate(comparand));
-  BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
-                                            int comparand,
-                                            Label* if_lt) {
-  __ cmp(register_location(reg), Immediate(comparand));
-  BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
-                                               Label* if_eq) {
-  __ cmp(edi, register_location(reg));
-  BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
-    RegExpMacroAssemblerIA32::Implementation() {
-  return kIA32Implementation;
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
-                                                    Label* on_end_of_input,
-                                                    bool check_bounds,
-                                                    int characters) {
-  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
-  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
-  if (check_bounds) {
-    CheckPosition(cp_offset + characters - 1, on_end_of_input);
-  }
-  LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerIA32::PopCurrentPosition() {
-  Pop(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
-  Pop(eax);
-  __ mov(register_location(register_index), eax);
-}
-
-
-void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
-  Push(Immediate::CodeRelativeOffset(label));
-  CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::PushCurrentPosition() {
-  Push(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PushRegister(int register_index,
-                                            StackCheckFlag check_stack_limit) {
-  __ mov(eax, register_location(register_index));
-  Push(eax);
-  if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
-  __ mov(edi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
-  __ mov(backtrack_stackpointer(), register_location(reg));
-  __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-}
-
-void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by)  {
-  Label after_position;
-  __ cmp(edi, -by * char_size());
-  __ j(greater_equal, &after_position, Label::kNear);
-  __ mov(edi, -by * char_size());
-  // On RegExp code entry (where this operation is used), the character before
-  // the current position is expected to be already loaded.
-  // We have advanced the position, so it's safe to read backwards.
-  LoadCurrentCharacterUnchecked(-1, 1);
-  __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
-  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
-  __ mov(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerIA32::Succeed() {
-  __ jmp(&success_label_);
-  return global();
-}
-
-
-void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
-                                                              int cp_offset) {
-  if (cp_offset == 0) {
-    __ mov(register_location(reg), edi);
-  } else {
-    __ lea(eax, Operand(edi, cp_offset * char_size()));
-    __ mov(register_location(reg), eax);
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
-  DCHECK(reg_from <= reg_to);
-  __ mov(eax, Operand(ebp, kInputStartMinusOne));
-  for (int reg = reg_from; reg <= reg_to; reg++) {
-    __ mov(register_location(reg), eax);
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
-  __ mov(eax, backtrack_stackpointer());
-  __ sub(eax, Operand(ebp, kStackHighEnd));
-  __ mov(register_location(reg), eax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
-  static const int num_arguments = 3;
-  __ PrepareCallCFunction(num_arguments, scratch);
-  // RegExp code frame pointer.
-  __ mov(Operand(esp, 2 * kPointerSize), ebp);
-  // Code* of self.
-  __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
-  // Next address on the stack (will be address of return address).
-  __ lea(eax, Operand(esp, -kPointerSize));
-  __ mov(Operand(esp, 0 * kPointerSize), eax);
-  ExternalReference check_stack_guard =
-      ExternalReference::re_check_stack_guard_state(isolate());
-  __ CallCFunction(check_stack_guard, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
-  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
-                                                   Code* re_code,
-                                                   Address re_frame) {
-  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
-  StackLimitCheck check(isolate);
-  if (check.JsHasOverflowed()) {
-    isolate->StackOverflow();
-    return EXCEPTION;
-  }
-
-  // If not real stack overflow the stack guard was used to interrupt
-  // execution for another purpose.
-
-  // If this is a direct call from JavaScript retry the RegExp forcing the call
-  // through the runtime system. Currently the direct call cannot handle a GC.
-  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
-    return RETRY;
-  }
-
-  // Prepare for possible GC.
-  HandleScope handles(isolate);
-  Handle<Code> code_handle(re_code);
-
-  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
-  // Current string.
-  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
-
-  DCHECK(re_code->instruction_start() <= *return_address);
-  DCHECK(*return_address <=
-      re_code->instruction_start() + re_code->instruction_size());
-
-  Object* result = isolate->stack_guard()->HandleInterrupts();
-
-  if (*code_handle != re_code) {  // Return address no longer valid
-    int delta = code_handle->address() - re_code->address();
-    // Overwrite the return address on the stack.
-    *return_address += delta;
-  }
-
-  if (result->IsException()) {
-    return EXCEPTION;
-  }
-
-  Handle<String> subject_tmp = subject;
-  int slice_offset = 0;
-
-  // Extract the underlying string and the slice offset.
-  if (StringShape(*subject_tmp).IsCons()) {
-    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
-  } else if (StringShape(*subject_tmp).IsSliced()) {
-    SlicedString* slice = SlicedString::cast(*subject_tmp);
-    subject_tmp = Handle<String>(slice->parent());
-    slice_offset = slice->offset();
-  }
-
-  // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
-    // If we changed between an LATIN1 and an UC16 string, the specialized
-    // code cannot be used, and we need to restart regexp matching from
-    // scratch (including, potentially, compiling a new version of the code).
-    return RETRY;
-  }
-
-  // Otherwise, the content of the string might have moved. It must still
-  // be a sequential or external string with the same content.
-  // Update the start and end pointers in the stack frame to the current
-  // location (whether it has actually moved or not).
-  DCHECK(StringShape(*subject_tmp).IsSequential() ||
-      StringShape(*subject_tmp).IsExternal());
-
-  // The original start address of the characters to match.
-  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
-  // Find the current start address of the same character at the current string
-  // position.
-  int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject_tmp,
-                                                    start_index + slice_offset);
-
-  if (start_address != new_address) {
-    // If there is a difference, update the object pointer and start and end
-    // addresses in the RegExp stack frame to match the new value.
-    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = static_cast<int>(end_address - start_address);
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
-    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
-    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
-  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
-    // Subject string might have been a ConsString that underwent
-    // short-circuiting during GC. That will not change start_address but
-    // will change pointer inside the subject handle.
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
-  }
-
-  return 0;
-}
-
-
-Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
-  DCHECK(register_index < (1<<30));
-  if (num_registers_ <= register_index) {
-    num_registers_ = register_index + 1;
-  }
-  return Operand(ebp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
-                                             Label* on_outside_input) {
-  __ cmp(edi, -cp_offset * char_size());
-  BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
-                                                 Label* to) {
-  if (condition < 0) {  // No condition
-    if (to == NULL) {
-      Backtrack();
-      return;
-    }
-    __ jmp(to);
-    return;
-  }
-  if (to == NULL) {
-    __ j(condition, &backtrack_label_);
-    return;
-  }
-  __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
-  Label return_to;
-  __ push(Immediate::CodeRelativeOffset(&return_to));
-  __ jmp(to);
-  __ bind(&return_to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeReturn() {
-  __ pop(ebx);
-  __ add(ebx, Immediate(masm_->CodeObject()));
-  __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
-  __ bind(name);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Register source) {
-  DCHECK(!source.is(backtrack_stackpointer()));
-  // Notice: This updates flags, unlike normal Push.
-  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
-  __ mov(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Immediate value) {
-  // Notice: This updates flags, unlike normal Push.
-  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
-  __ mov(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerIA32::Pop(Register target) {
-  DCHECK(!target.is(backtrack_stackpointer()));
-  __ mov(target, Operand(backtrack_stackpointer(), 0));
-  // Notice: This updates flags, unlike normal Pop.
-  __ add(backtrack_stackpointer(), Immediate(kPointerSize));
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPreemption() {
-  // Check for preemption.
-  Label no_preempt;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above, &no_preempt);
-
-  SafeCall(&check_preempt_label_);
-
-  __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckStackLimit() {
-  Label no_stack_overflow;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_regexp_stack_limit(isolate());
-  __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
-  __ j(above, &no_stack_overflow);
-
-  SafeCall(&stack_overflow_label_);
-
-  __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
-                                                             int characters) {
-  if (mode_ == LATIN1) {
-    if (characters == 4) {
-      __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
-    } else if (characters == 2) {
-      __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
-    } else {
-      DCHECK(characters == 1);
-      __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
-    }
-  } else {
-    DCHECK(mode_ == UC16);
-    if (characters == 2) {
-      __ mov(current_character(),
-             Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
-    } else {
-      DCHECK(characters == 1);
-      __ movzx_w(current_character(),
-                 Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
-    }
-  }
-}
-
-
-#undef __
-
-#endif  // V8_INTERPRETED_REGEXP
-
-}}  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
deleted file mode 100644
index 8f6499c..0000000
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/assembler-ia32-inl.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
- public:
-  RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
-  virtual ~RegExpMacroAssemblerIA32();
-  virtual int stack_limit_slack();
-  virtual void AdvanceCurrentPosition(int by);
-  virtual void AdvanceRegister(int reg, int by);
-  virtual void Backtrack();
-  virtual void Bind(Label* label);
-  virtual void CheckAtStart(Label* on_at_start);
-  virtual void CheckCharacter(uint32_t c, Label* on_equal);
-  virtual void CheckCharacterAfterAnd(uint32_t c,
-                                      uint32_t mask,
-                                      Label* on_equal);
-  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
-  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
-  // A "greedy loop" is a loop that is both greedy and with a simple
-  // body. It has a particularly simple implementation.
-  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
-  virtual void CheckNotAtStart(Label* on_not_at_start);
-  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
-  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               Label* on_no_match);
-  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
-  virtual void CheckNotCharacterAfterAnd(uint32_t c,
-                                         uint32_t mask,
-                                         Label* on_not_equal);
-  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
-                                              uc16 minus,
-                                              uc16 mask,
-                                              Label* on_not_equal);
-  virtual void CheckCharacterInRange(uc16 from,
-                                     uc16 to,
-                                     Label* on_in_range);
-  virtual void CheckCharacterNotInRange(uc16 from,
-                                        uc16 to,
-                                        Label* on_not_in_range);
-  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
-  // Checks whether the given offset from the current position is before
-  // the end of the string.
-  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
-  virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
-  virtual void Fail();
-  virtual Handle<HeapObject> GetCode(Handle<String> source);
-  virtual void GoTo(Label* label);
-  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
-  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
-  virtual void IfRegisterEqPos(int reg, Label* if_eq);
-  virtual IrregexpImplementation Implementation();
-  virtual void LoadCurrentCharacter(int cp_offset,
-                                    Label* on_end_of_input,
-                                    bool check_bounds = true,
-                                    int characters = 1);
-  virtual void PopCurrentPosition();
-  virtual void PopRegister(int register_index);
-  virtual void PushBacktrack(Label* label);
-  virtual void PushCurrentPosition();
-  virtual void PushRegister(int register_index,
-                            StackCheckFlag check_stack_limit);
-  virtual void ReadCurrentPositionFromRegister(int reg);
-  virtual void ReadStackPointerFromRegister(int reg);
-  virtual void SetCurrentPositionFromEnd(int by);
-  virtual void SetRegister(int register_index, int to);
-  virtual bool Succeed();
-  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
-  virtual void ClearRegisters(int reg_from, int reg_to);
-  virtual void WriteStackPointerToRegister(int reg);
-
-  // Called from RegExp if the stack-guard is triggered.
-  // If the code object is relocated, the return address is fixed before
-  // returning.
-  static int CheckStackGuardState(Address* return_address,
-                                  Code* re_code,
-                                  Address re_frame);
-
- private:
-  // Offsets from ebp of function parameters and stored registers.
-  static const int kFramePointer = 0;
-  // Above the frame pointer - function parameters and return address.
-  static const int kReturn_eip = kFramePointer + kPointerSize;
-  static const int kFrameAlign = kReturn_eip + kPointerSize;
-  // Parameters.
-  static const int kInputString = kFrameAlign;
-  static const int kStartIndex = kInputString + kPointerSize;
-  static const int kInputStart = kStartIndex + kPointerSize;
-  static const int kInputEnd = kInputStart + kPointerSize;
-  static const int kRegisterOutput = kInputEnd + kPointerSize;
-  // For the case of global regular expression, we have room to store at least
-  // one set of capture results.  For the case of non-global regexp, we ignore
-  // this value.
-  static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
-  static const int kDirectCall = kStackHighEnd + kPointerSize;
-  static const int kIsolate = kDirectCall + kPointerSize;
-  // Below the frame pointer - local stack variables.
-  // When adding local variables remember to push space for them in
-  // the frame in GetCode.
-  static const int kBackup_esi = kFramePointer - kPointerSize;
-  static const int kBackup_edi = kBackup_esi - kPointerSize;
-  static const int kBackup_ebx = kBackup_edi - kPointerSize;
-  static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
-  static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
-  // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
-  // Initial size of code buffer.
-  static const size_t kRegExpCodeSize = 1024;
-
-  // Load a number of characters at the given offset from the
-  // current position, into the current-character register.
-  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
-  // Check whether preemption has been requested.
-  void CheckPreemption();
-
-  // Check whether we are exceeding the stack limit on the backtrack stack.
-  void CheckStackLimit();
-
-  // Generate a call to CheckStackGuardState.
-  void CallCheckStackGuardState(Register scratch);
-
-  // The ebp-relative location of a regexp register.
-  Operand register_location(int register_index);
-
-  // The register containing the current character after LoadCurrentCharacter.
-  inline Register current_character() { return edx; }
-
-  // The register containing the backtrack stack top. Provides a meaningful
-  // name to the register.
-  inline Register backtrack_stackpointer() { return ecx; }
-
-  // Byte size of chars in the string to match (decided by the Mode argument)
-  inline int char_size() { return static_cast<int>(mode_); }
-
-  // Equivalent to a conditional branch to the label, unless the label
-  // is NULL, in which case it is a conditional Backtrack.
-  void BranchOrBacktrack(Condition condition, Label* to);
-
-  // Call and return internally in the generated code in a way that
-  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
-  inline void SafeCall(Label* to);
-  inline void SafeReturn();
-  inline void SafeCallTarget(Label* name);
-
-  // Pushes the value of a register on the backtrack stack. Decrements the
-  // stack pointer (ecx) by a word size and stores the register's value there.
-  inline void Push(Register source);
-
-  // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
-  // by a word size and stores the value there.
-  inline void Push(Immediate value);
-
-  // Pops a value from the backtrack stack. Reads the word at the stack pointer
-  // (ecx) and increments it by a word size.
-  inline void Pop(Register target);
-
-  Isolate* isolate() const { return masm_->isolate(); }
-
-  MacroAssembler* masm_;
-
-  // Which mode to generate code for (LATIN1 or UC16).
-  Mode mode_;
-
-  // One greater than maximal register index actually used.
-  int num_registers_;
-
-  // Number of registers to output at the end (the saved registers
-  // are always 0..num_saved_registers_-1)
-  int num_saved_registers_;
-
-  // Labels used internally.
-  Label entry_label_;
-  Label start_label_;
-  Label success_label_;
-  Label backtrack_label_;
-  Label exit_label_;
-  Label check_preempt_label_;
-  Label stack_overflow_label_;
-};
-#endif  // V8_INTERPRETED_REGEXP
-
-}}  // namespace v8::internal
-
-#endif  // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/ia32/simulator-ia32.cc b/src/ia32/simulator-ia32.cc
index 20edae8..d696e4b 100644
--- a/src/ia32/simulator-ia32.cc
+++ b/src/ia32/simulator-ia32.cc
@@ -2,5 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/ia32/simulator-ia32.h"
 
 // Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 02a8e9c..076bde8 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -12,7 +12,7 @@
 
 // Since there is no simulator for the ia32 architecture the only thing we can
 // do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
   (entry(p0, p1, p2, p3, p4))
 
 
@@ -21,7 +21,8 @@
 
 // Call the generated regexp code directly. The code at the entry address should
 // expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+                                   p7, p8)                                     \
   (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
 
 
@@ -36,13 +37,18 @@
     return c_limit;
   }
 
-  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+  static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+                                            uintptr_t try_catch_address) {
+    USE(isolate);
     return try_catch_address;
   }
 
-  static inline void UnregisterCTryCatch() { }
+  static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+    USE(isolate);
+  }
 };
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_IA32_SIMULATOR_IA32_H_