Update V8 to r6101 as required by WebKit r74534

Change-Id: I7f84af8dd732f11898fd644b2c2b1538914cb78d
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index ecbdfdc..54cfb5c 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -120,6 +120,30 @@
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 Address RelocInfo::call_address() {
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -167,6 +191,8 @@
     visitor->VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -190,6 +216,8 @@
     StaticVisitor::VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -246,6 +274,12 @@
 }
 
 
+Immediate::Immediate(Address addr) {
+  x_ = reinterpret_cast<int32_t>(addr);
+  rmode_ = RelocInfo::NONE;
+}
+
+
 void Assembler::emit(uint32_t x) {
   *reinterpret_cast<uint32_t*>(pc_) = x;
   pc_ += sizeof(uint32_t);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 125f503..c173a3d 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -56,10 +56,10 @@
 
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool portable) {
   ASSERT(Heap::HasBeenSetup());
   ASSERT(supported_ == 0);
-  if (Serializer::enabled()) {
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -137,7 +137,7 @@
   found_by_runtime_probing_ = supported_;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= ~os_guarantees;
+  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
 }
 
 
@@ -435,6 +435,13 @@
 }
 
 
+void Assembler::push_imm32(int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x68);
+  emit(imm32);
+}
+
+
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1542,7 +1549,9 @@
   L->bind_to(pc_offset());
 }
 
+
 void Assembler::call(Label* L) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   if (L->is_bound()) {
@@ -1561,6 +1570,7 @@
 
 
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
@@ -1570,6 +1580,7 @@
 
 
 void Assembler::call(const Operand& adr) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xFF);
@@ -1772,6 +1783,14 @@
 }
 
 
+void Assembler::fldln2() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xED);
+}
+
+
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1902,6 +1921,14 @@
 }
 
 
+void Assembler::fyl2x() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF1);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2382,6 +2409,7 @@
   emit_sse_operand(dst, src);
 }
 
+
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2404,6 +2432,28 @@
 }
 
 
+void Assembler::movd(const Operand& dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x7E);
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::pand(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xDB);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2427,7 +2477,7 @@
 }
 
 
-void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+void Assembler::psllq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2435,7 +2485,32 @@
   EMIT(0x0F);
   EMIT(0x73);
   emit_sse_operand(esi, reg);  // esi == 6
-  EMIT(imm8);
+  EMIT(shift);
+}
+
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x70);
+  emit_sse_operand(dst, src);
+  EMIT(shuffle);
+}
+
+
+void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x16);
+  emit_sse_operand(src, dst);
+  EMIT(offset);
 }
 
 
@@ -2475,7 +2550,7 @@
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
@@ -2607,9 +2682,15 @@
 }
 
 
-void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
+void Assembler::db(uint8_t data) {
   EnsureSpace ensure_space(this);
-  emit(data, reloc_info);
+  EMIT(data);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  EnsureSpace ensure_space(this);
+  emit(data);
 }
 
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 79637a1..11acb56 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
@@ -64,7 +64,36 @@
 // and best performance in optimized code.
 //
 struct Register {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 5;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(Register reg) {
+    ASSERT(reg.code() < 4 || reg.code() == 7);
+    return (reg.code() == 7) ? 4 : reg.code();
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return (index == 4) ? from_code(7) : from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "eax",
+      "ecx",
+      "edx",
+      "ebx",
+      "edi"
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   // eax, ebx, ecx and edx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
@@ -93,7 +122,40 @@
 
 
 struct XMMRegister {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 7;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static XMMRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 1);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "xmm1",
+      "xmm2",
+      "xmm3",
+      "xmm4",
+      "xmm5",
+      "xmm6",
+      "xmm7"
+    };
+    return names[index];
+  }
+
+  static XMMRegister from_code(int code) {
+    XMMRegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -102,6 +164,7 @@
   int code_;
 };
 
+
 const XMMRegister xmm0 = { 0 };
 const XMMRegister xmm1 = { 1 };
 const XMMRegister xmm2 = { 2 };
@@ -111,6 +174,17 @@
 const XMMRegister xmm6 = { 6 };
 const XMMRegister xmm7 = { 7 };
 
+
+typedef XMMRegister DoubleRegister;
+
+
+// Index of register used in pusha/popa.
+// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
+inline int EspIndexForPushAll(Register reg) {
+  return Register::kNumRegisters - 1 - reg.code();
+}
+
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -202,6 +276,7 @@
   inline explicit Immediate(const ExternalReference& ext);
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
+  inline explicit Immediate(Address addr);
 
   static Immediate CodeRelativeOffset(Label* label) {
     return Immediate(label);
@@ -281,6 +356,11 @@
                    RelocInfo::EXTERNAL_REFERENCE);
   }
 
+  static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
+    return Operand(reinterpret_cast<int32_t>(cell.location()),
+                   RelocInfo::GLOBAL_PROPERTY_CELL);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -369,9 +449,12 @@
 //   }
 class CpuFeatures : public AllStatic {
  public:
-  // Detect features of the target CPU. Set safe defaults if the serializer
-  // is enabled (snapshots must be portable).
-  static void Probe();
+  // Detect features of the target CPU. If the portable flag is set,
+  // the method sets safe defaults if the serializer is enabled
+  // (snapshots must be portable).
+  static void Probe(bool portable);
+  static void Clear() { supported_ = 0; }
+
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
@@ -484,6 +567,20 @@
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
 
+  // One byte opcode for test eax,0xXXXXXXXX.
+  static const byte kTestEaxByte = 0xA9;
+  // One byte opcode for test al, 0xXX.
+  static const byte kTestAlByte = 0xA8;
+  // One byte opcode for nop.
+  static const byte kNopByte = 0x90;
+
+  // One byte opcode for a short unconditional jump.
+  static const byte kJmpShortOpcode = 0xEB;
+  // One byte prefix for a short conditional jump.
+  static const byte kJccShortPrefix = 0x70;
+  static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+  static const byte kJcShortOpcode = kJccShortPrefix | carry;
+
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -519,6 +616,7 @@
   void popfd();
 
   void push(const Immediate& x);
+  void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
 
@@ -720,6 +818,7 @@
   void fld1();
   void fldz();
   void fldpi();
+  void fldln2();
 
   void fld_s(const Operand& adr);
   void fld_d(const Operand& adr);
@@ -744,6 +843,7 @@
   void fchs();
   void fcos();
   void fsin();
+  void fyl2x();
 
   void fadd(int i);
   void fsub(int i);
@@ -814,12 +914,16 @@
   void movdbl(const Operand& dst, XMMRegister src);
 
   void movd(XMMRegister dst, const Operand& src);
+  void movd(const Operand& src, XMMRegister dst);
   void movsd(XMMRegister dst, XMMRegister src);
 
+  void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
-  void psllq(XMMRegister reg, int8_t imm8);
+  void psllq(XMMRegister reg, int8_t shift);
+  void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+  void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
 
   // Parallel XMM operations.
   void movntdqa(XMMRegister src, const Operand& dst);
@@ -843,12 +947,13 @@
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
-  // Writes a single word of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
 
   int pc_offset() const { return pc_ - buffer_; }
 
@@ -876,8 +981,8 @@
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(Register dst, XMMRegister src);
 
- private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
+ private:
   byte byte_at(int pos)  { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 0ad3e6d..918f346 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,9 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "code-stubs.h"
 #include "codegen-inl.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -480,6 +481,85 @@
 }
 
 
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+
+  __ push(edi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+
+  // Restore function and tear down temporary frame.
+  __ pop(edi);
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(ecx));
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Pass the function and deoptimization type to the runtime system.
+  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Get the full codegen state from the stack and untag it.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ SmiUntag(ecx);
+
+  // Switch on the state.
+  NearLabel not_no_registers, not_tos_eax;
+  __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+  __ j(not_equal, &not_no_registers);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ cmp(ecx, FullCodeGenerator::TOS_REG);
+  __ j(not_equal, &not_tos_eax);
+  __ ret(2 * kPointerSize);  // Remove state, eax.
+
+  __ bind(&not_tos_eax);
+  __ Abort("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  // TODO(kasperl): Do we need to save/restore the XMM registers too?
+
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  __ pushad();
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
+  __ popad();
+  __ ret(0);
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   { Label done;
@@ -1418,6 +1498,76 @@
 }
 
 
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // We shouldn't be performing on-stack replacement in the first
+  // place if the CPU features we need for the optimized Crankshaft
+  // code aren't supported.
+  CpuFeatures::Probe(false);
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
+    return;
+  }
+
+  // Get the loop depth of the stack guard check. This is recorded in
+  // a test(eax, depth) instruction right after the call.
+  Label stack_check;
+  __ mov(ebx, Operand(esp, 0));  // return address
+  if (FLAG_debug_code) {
+    __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
+    __ Assert(equal, "test eax instruction not found after loop stack check");
+  }
+  __ movzx_b(ebx, Operand(ebx, 1));  // depth
+
+  // Get the loop nesting level at which we allow OSR from the
+  // unoptimized code and check if we want to do OSR yet. If not we
+  // should perform a stack guard check so we can get interrupts while
+  // waiting for on-stack replacement.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
+  __ j(greater, &stack_check);
+
+  // Pass the function to optimize as the argument to the on-stack
+  // replacement runtime function.
+  __ EnterInternalFrame();
+  __ push(eax);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  NearLabel skip;
+  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ j(not_equal, &skip);
+  __ ret(0);
+
+  // If we decide not to perform on-stack replacement we perform a
+  // stack guard check to enable interrupts.
+  __ bind(&stack_check);
+  NearLabel ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ TailCallStub(&stub);
+  __ Abort("Unreachable code: returned from tail call.");
+  __ bind(&ok);
+  __ ret(0);
+
+  __ bind(&skip);
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(eax);
+  __ push(eax);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 5975ad2..a371c96 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,8 @@
   __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+         Immediate(Factory::undefined_value()));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -446,6 +448,11 @@
                                  Label* non_float,
                                  Register scratch);
 
+  // Checks that the two floating point numbers on top of the FPU stack
+  // have int32 values.
+  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                         Label* non_int32);
+
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadAsIntegers(MacroAssembler* masm,
@@ -460,8 +467,16 @@
                                      bool use_sse3,
                                      Label* operand_conversion_failure);
 
-  // Test if operands are smis or heap numbers and load them
-  // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
+  // operands are pushed on the stack, and that their conversions to int32
+  // are in eax and ecx.  Checks that the original numbers were in the int32
+  // range.
+  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                           bool use_sse3,
+                                           Label* not_int32);
+
+  // Assumes that operands are smis or heap numbers and loads them
+  // into xmm0 and xmm1. Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSSE2Operands(MacroAssembler* masm);
 
@@ -474,6 +489,12 @@
   // Similar to LoadSSE2Operands but assumes that both operands are smis.
   // Expects operands in edx, eax.
   static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+
+  // Checks that the two floating point numbers loaded into xmm0 and xmm1
+  // have int32 values.
+  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                        Label* non_int32,
+                                        Register scratch);
 };
 
 
@@ -709,22 +730,27 @@
     case Token::SHL: {
       Comment perform_float(masm, "-- Perform float operation on smis");
       __ bind(&use_fp_on_smis);
-      // Result we want is in left == edx, so we can put the allocated heap
-      // number in eax.
-      __ AllocateHeapNumber(eax, ecx, ebx, slow);
-      // Store the result in the HeapNumber and return.
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        __ cvtsi2sd(xmm0, Operand(left));
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        GenerateReturn(masm);
       } else {
-        // It's OK to overwrite the right argument on the stack because we
-        // are about to return.
-        __ mov(Operand(esp, 1 * kPointerSize), left);
-        __ fild_s(Operand(esp, 1 * kPointerSize));
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      GenerateReturn(masm);
       break;
     }
 
@@ -757,31 +783,36 @@
         default: UNREACHABLE();
           break;
       }
-      __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
         }
-        __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
-      } else {  // SSE2 not available, use FPU.
-        FloatingPointHelper::LoadFloatSmis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
-        }
-        __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        __ mov(eax, ecx);
+        GenerateReturn(masm);
+      } else {
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      __ mov(eax, ecx);
-      GenerateReturn(masm);
       break;
     }
 
@@ -821,6 +852,13 @@
 
   __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
 
+  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
+    Label slow;
+    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
+    __ bind(&slow);
+    GenerateTypeTransition(masm);
+  }
+
   // Generate fast case smi code if requested. This flag is set when the fast
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
@@ -1215,42 +1253,1284 @@
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  __ push(edx);
+  __ push(eax);
+  // Left and right arguments are now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+// Prepare for a type transition runtime call when the args are already on
+// the stack, under the return address.
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+    MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  // Left and right arguments are already on top of the stack.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case TRBinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case TRBinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case TRBinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case TRBinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case TRBinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case TRBinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               TRBinaryOpIC::GetName(operands_type_));
+  return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* slow,
+    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+  // dividend in eax and edx free for the division.  Use eax, ebx for those.
+  Comment load_comment(masm, "-- Load arguments");
+  Register left = edx;
+  Register right = eax;
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    left = eax;
+    right = ebx;
+    __ mov(ebx, eax);
+    __ mov(eax, edx);
+  }
+
+
+  // 2. Prepare the smi check of both operands by oring them together.
+  Comment smi_check_comment(masm, "-- Smi check arguments");
+  Label not_smis;
+  Register combined = ecx;
+  ASSERT(!left.is(combined) && !right.is(combined));
+  switch (op_) {
+    case Token::BIT_OR:
+      // Perform the operation into eax and smi check the result.  Preserve
+      // eax in case the result is not a smi.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      combined = right;
+      break;
+
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      __ mov(combined, right);
+      __ or_(combined, Operand(left));
+      break;
+
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Move the right operand into ecx for the shift operation, use eax
+      // for the smi check register.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));
+      combined = right;
+      break;
+
+    default:
+      break;
+  }
+
+  // 3. Perform the smi check of the operands.
+  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
+  __ test(combined, Immediate(kSmiTagMask));
+  __ j(not_zero, &not_smis, not_taken);
+
+  // 4. Operands are both smis, perform the operation leaving the result in
+  // eax and check the result if necessary.
+  Comment perform_smi(masm, "-- Perform smi operation");
+  Label use_fp_on_smis;
+  switch (op_) {
+    case Token::BIT_OR:
+      // Nothing to do.
+      break;
+
+    case Token::BIT_XOR:
+      ASSERT(right.is(eax));
+      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      break;
+
+    case Token::BIT_AND:
+      ASSERT(right.is(eax));
+      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      break;
+
+    case Token::SHL:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shl_cl(left);
+      // Check that the *signed* result fits in a smi.
+      __ cmp(left, 0xc0000000);
+      __ j(sign, &use_fp_on_smis, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SAR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ sar_cl(left);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SHR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shr_cl(left);
+      // Check that the *unsigned* result fits in a smi.
+      // Neither of the two high-order bits can be set:
+      // - 0x80000000: high bit would be lost when smi tagging.
+      // - 0x40000000: this number would convert to negative when
+      // Smi tagging these two cases can only happen with shifts
+      // by 0 or 1 when handed a valid smi.
+      __ test(left, Immediate(0xc0000000));
+      __ j(not_zero, slow, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::ADD:
+      ASSERT(right.is(eax));
+      __ add(right, Operand(left));  // Addition is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(left, Operand(right));
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ mov(eax, left);
+      break;
+
+    case Token::MUL:
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // We can't revert the multiplication if the result is not a smi
+      // so save the right operand.
+      __ mov(ebx, right);
+      // Remove tag from one of the operands (but keep sign).
+      __ SmiUntag(right);
+      // Do multiplication.
+      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+      break;
+
+    case Token::DIV:
+      // We can't revert the division if the result is not a smi so
+      // save the left operand.
+      __ mov(edi, left);
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &use_fp_on_smis, not_taken);
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by idiv
+      // instruction.
+      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      __ j(equal, &use_fp_on_smis);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &use_fp_on_smis);
+      // Tag the result and store it in register eax.
+      __ SmiTag(eax);
+      break;
+
+    case Token::MOD:
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &not_smis, not_taken);
+
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(edx, combined, slow);
+      // Move remainder to register eax.
+      __ mov(eax, edx);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // 5. Emit return of result in eax.  Some operations have registers pushed.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      __ ret(0);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      __ ret(2 * kPointerSize);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // 6. For some operations emit inline code to perform floating point
+  // operations on known smis (e.g., if the result of the operation
+  // overflowed the smi range).
+  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+    __ bind(&use_fp_on_smis);
+    switch (op_) {
+      // Undo the effects of some operations, and some register moves.
+      case Token::SHL:
+        // The arguments are saved on the stack, and only used from there.
+        break;
+      case Token::ADD:
+        // Revert right = right + left.
+        __ sub(right, Operand(left));
+        break;
+      case Token::SUB:
+        // Revert left = left - right.
+        __ add(left, Operand(right));
+        break;
+      case Token::MUL:
+        // Right was clobbered but a copy is in ebx.
+        __ mov(right, ebx);
+        break;
+      case Token::DIV:
+        // Left was clobbered but a copy is in edi.  Right is in ebx for
+        // division.  They should be in eax, ebx for jump to not_smi.
+        __ mov(eax, edi);
+        break;
+      default:
+        // No other operators jump to use_fp_on_smis.
+        break;
+    }
+    __ jmp(&not_smis);
+  } else {
+    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
+    switch (op_) {
+      case Token::SHL: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+      __ ret(2 * kPointerSize);
+      break;
+      }
+
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Restore arguments to edx, eax.
+        switch (op_) {
+          case Token::ADD:
+            // Revert right = right + left.
+            __ sub(right, Operand(left));
+            break;
+          case Token::SUB:
+            // Revert left = left - right.
+            __ add(left, Operand(right));
+            break;
+          case Token::MUL:
+            // Right was clobbered but a copy is in ebx.
+            __ mov(right, ebx);
+            break;
+          case Token::DIV:
+            // Left was clobbered but a copy is in edi.  Right is in ebx for
+            // division.
+            __ mov(edx, edi);
+            __ mov(eax, right);
+            break;
+          default: UNREACHABLE();
+            break;
+        }
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        }
+        __ mov(eax, ecx);
+        __ ret(0);
+        break;
+      }
+
+      default:
+        break;
+    }
+  }
+
+  // 7. Non-smi operands, fall out to the non-smi code with the operands in
+  // edx and eax.
+  Comment done_comment(masm, "-- Enter non-smi code");
+  __ bind(&not_smis);
+  switch (op_) {
+    case Token::BIT_OR:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Right operand is saved in ecx and eax was destroyed by the smi
+      // check.
+      __ mov(eax, ecx);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Operands are in eax, ebx at this point.
+      __ mov(edx, eax);
+      __ mov(eax, ebx);
+      break;
+
+    default:
+      break;
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+  } else {
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      GenerateTypeTransition(masm);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+  ASSERT(op_ == Token::ADD);
+  // If one of the arguments is a string, call the string add stub.
+  // Otherwise, transition to the generic TRBinaryOpIC type.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  NearLabel left_not_string;
+  __ test(left, Immediate(kSmiTagMask));
+  __ j(zero, &left_not_string);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &left_not_string);
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ test(right, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // Neither argument is a string.
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      Label not_int32;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Check result type if it is currently Int32.
+        if (result_type_ <= TRBinaryOpIC::INT32) {
+          __ cvttsd2si(ecx, Operand(xmm0));
+          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ ucomisd(xmm0, xmm2);
+          __ j(not_zero, &not_int32);
+          __ j(carry, &not_int32);
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label not_int32;
+      Label non_smi_result;
+      /*  {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        }*/
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+                                                        &not_int32);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
+         operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime;
+
+  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+          __ ffree();
+          __ jmp(&call_runtime);
+      }
+        __ bind(&not_floats);
+        break;
+      }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+      case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &call_runtime);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+              // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);
+      }
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD: {
+      GenerateRegisterArgsPush(masm);
+      // Test for string arguments before calling runtime.
+      // Registers containing left and right operands respectively.
+      Register lhs, rhs;
+      lhs = edx;
+      rhs = eax;
+
+      // Test if left operand is a string.
+      NearLabel lhs_not_string;
+      __ test(lhs, Immediate(kSmiTagMask));
+      __ j(zero, &lhs_not_string);
+      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &lhs_not_string);
+
+      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+      __ TailCallStub(&string_add_left_stub);
+
+      NearLabel call_add_runtime;
+      // Left operand is not a string, test right.
+      __ bind(&lhs_not_string);
+      __ test(rhs, Immediate(kSmiTagMask));
+      __ j(zero, &call_add_runtime);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &call_add_runtime);
+
+      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+      __ TailCallStub(&string_add_right_stub);
+
+      // Neither argument is a string.
+      __ bind(&call_add_runtime);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    }
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+    MacroAssembler* masm,
+    Label* alloc_failure) {
+  Label skip_allocation;
+  OverwriteMode mode = mode_;
+  switch (mode) {
+    case OVERWRITE_LEFT: {
+      // If the argument in edx is already an object, we skip the
+      // allocation of a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now edx can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(edx, Operand(ebx));
+      __ bind(&skip_allocation);
+      // Use object in edx as a result holder
+      __ mov(eax, Operand(edx));
+      break;
+    }
+    case OVERWRITE_RIGHT:
+      // If the argument in eax is already an object, we skip the
+      // allocation of a heap number.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Fall through!
+    case NO_OVERWRITE:
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now eax can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(eax, ebx);
+      __ bind(&skip_allocation);
+      break;
+    default: UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
-  // Input on stack:
-  // esp[4]: argument (should be number).
-  // esp[0]: return address.
-  // Test that eax is a number.
+  // TAGGED case:
+  //   Input:
+  //     esp[4]: tagged number input argument (should be number).
+  //     esp[0]: return address.
+  //   Output:
+  //     eax: tagged double result.
+  // UNTAGGED case:
+  //   Input::
+  //     esp[0]: return address.
+  //     xmm1: untagged double input argument
+  //   Output:
+  //     xmm1: untagged double result.
+
   Label runtime_call;
   Label runtime_call_clear_stack;
-  NearLabel input_not_smi;
-  NearLabel loaded;
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &input_not_smi);
-  // Input is a smi. Untag and load it onto the FPU stack.
-  // Then load the low and high words of the double into ebx, edx.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  __ sar(eax, 1);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
-  __ mov(Operand(esp, 0), eax);
-  __ fild_s(Operand(esp, 0));
-  __ fst_d(Operand(esp, 0));
-  __ pop(edx);
-  __ pop(ebx);
-  __ jmp(&loaded);
-  __ bind(&input_not_smi);
-  // Check if input is a HeapNumber.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
-  __ j(not_equal, &runtime_call);
-  // Input is a HeapNumber. Push it on the FPU stack and load its
-  // low and high words into ebx, edx.
-  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
-  __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+  Label skip_cache;
+  const bool tagged = (argument_type_ == TAGGED);
+  if (tagged) {
+    // Test that eax is a number.
+    NearLabel input_not_smi;
+    NearLabel loaded;
+    __ mov(eax, Operand(esp, kPointerSize));
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(not_zero, &input_not_smi);
+    // Input is a smi. Untag and load it onto the FPU stack.
+    // Then load the low and high words of the double into ebx, edx.
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ sar(eax, 1);
+    __ sub(Operand(esp), Immediate(2 * kPointerSize));
+    __ mov(Operand(esp, 0), eax);
+    __ fild_s(Operand(esp, 0));
+    __ fst_d(Operand(esp, 0));
+    __ pop(edx);
+    __ pop(ebx);
+    __ jmp(&loaded);
+    __ bind(&input_not_smi);
+    // Check if input is a HeapNumber.
+    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+    __ j(not_equal, &runtime_call);
+    // Input is a HeapNumber. Push it on the FPU stack and load its
+    // low and high words into ebx, edx.
+    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+    __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
 
-  __ bind(&loaded);
-  // ST[0] == double value
+    __ bind(&loaded);
+  } else {  // UNTAGGED.
+    if (CpuFeatures::IsSupported(SSE4_1)) {
+      CpuFeatures::Scope sse4_scope(SSE4_1);
+      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
+    } else {
+      __ pshufd(xmm0, xmm1, 0x1);
+      __ movd(Operand(edx), xmm0);
+    }
+    __ movd(Operand(ebx), xmm1);
+  }
+
+  // ST[0] or xmm1  == double value
   // ebx = low 32 bits of double value
   // edx = high 32 bits of double value
   // Compute hash (the shifts are arithmetic):
@@ -1266,7 +2546,7 @@
   ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
   __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
 
-  // ST[0] == double value.
+  // ST[0] or xmm1 == double value.
   // ebx = low 32 bits of double value.
   // edx = high 32 bits of double value.
   // ecx = TranscendentalCache::hash(double value).
@@ -1303,33 +2583,83 @@
   __ j(not_equal, &cache_miss);
   // Cache hit!
   __ mov(eax, Operand(ecx, 2 * kIntSize));
-  __ fstp(0);
-  __ ret(kPointerSize);
+  if (tagged) {
+    __ fstp(0);
+    __ ret(kPointerSize);
+  } else {  // UNTAGGED.
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
+  }
 
   __ bind(&cache_miss);
   // Update cache with new value.
   // We are short on registers, so use no_reg as scratch.
   // This gives slightly larger code.
-  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+  if (tagged) {
+    __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+  } else {  // UNTAGGED.
+    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), xmm1);
+    __ fld_d(Operand(esp, 0));
+    __ add(Operand(esp), Immediate(kDoubleSize));
+  }
   GenerateOperation(masm);
   __ mov(Operand(ecx, 0), ebx);
   __ mov(Operand(ecx, kIntSize), edx);
   __ mov(Operand(ecx, 2 * kIntSize), eax);
   __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ ret(kPointerSize);
+  if (tagged) {
+    __ ret(kPointerSize);
+  } else {  // UNTAGGED.
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
 
-  __ bind(&runtime_call_clear_stack);
-  __ fstp(0);
-  __ bind(&runtime_call);
-  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+    // Skip cache and return answer directly, only in untagged case.
+    __ bind(&skip_cache);
+    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), xmm1);
+    __ fld_d(Operand(esp, 0));
+    GenerateOperation(masm);
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(xmm1, Operand(esp, 0));
+    __ add(Operand(esp), Immediate(kDoubleSize));
+    // We return the value in xmm1 without adding it to the cache, but
+    // we cause a scavenging GC so that future allocations will succeed.
+    __ EnterInternalFrame();
+    // Allocate an unused object bigger than a HeapNumber.
+    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
+    __ Ret();
+  }
+
+  // Call runtime, doing whatever allocation and cleanup is necessary.
+  if (tagged) {
+    __ bind(&runtime_call_clear_stack);
+    __ fstp(0);
+    __ bind(&runtime_call);
+    __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+  } else {  // UNTAGGED.
+    __ bind(&runtime_call_clear_stack);
+    __ bind(&runtime_call);
+    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+    __ EnterInternalFrame();
+    __ push(eax);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
+  }
 }
 
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
-    // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
       return Runtime::kAbort;
@@ -1339,85 +2669,90 @@
 
 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
   // Only free register is edi.
-  NearLabel done;
-  ASSERT(type_ == TranscendentalCache::SIN ||
-         type_ == TranscendentalCache::COS);
-  // More transcendental types can be added later.
+  // Input value is on FP stack, and also in ebx/edx.
+  // Input value is possibly in xmm1.
+  // Address of result (a newly allocated HeapNumber) may be in eax.
+  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+    // Both fsin and fcos require arguments in the range +/-2^63 and
+    // return NaN for infinities and NaN. They can share all code except
+    // the actual fsin/fcos operation.
+    NearLabel in_range, done;
+    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+    // work. We must reduce it to the appropriate range.
+    __ mov(edi, edx);
+    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+    int supported_exponent_limit =
+        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+    __ j(below, &in_range, taken);
+    // Check for infinity and NaN. Both return NaN for sin.
+    __ cmp(Operand(edi), Immediate(0x7ff00000));
+    NearLabel non_nan_result;
+    __ j(not_equal, &non_nan_result, taken);
+    // Input is +/-Infinity or NaN. Result is NaN.
+    __ fstp(0);
+    // NaN is represented by 0x7ff8000000000000.
+    __ push(Immediate(0x7ff80000));
+    __ push(Immediate(0));
+    __ fld_d(Operand(esp, 0));
+    __ add(Operand(esp), Immediate(2 * kPointerSize));
+    __ jmp(&done);
 
-  // Both fsin and fcos require arguments in the range +/-2^63 and
-  // return NaN for infinities and NaN. They can share all code except
-  // the actual fsin/fcos operation.
-  NearLabel in_range;
-  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
-  // work. We must reduce it to the appropriate range.
-  __ mov(edi, edx);
-  __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
-  int supported_exponent_limit =
-      (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-  __ cmp(Operand(edi), Immediate(supported_exponent_limit));
-  __ j(below, &in_range, taken);
-  // Check for infinity and NaN. Both return NaN for sin.
-  __ cmp(Operand(edi), Immediate(0x7ff00000));
-  NearLabel non_nan_result;
-  __ j(not_equal, &non_nan_result, taken);
-  // Input is +/-Infinity or NaN. Result is NaN.
-  __ fstp(0);
-  // NaN is represented by 0x7ff8000000000000.
-  __ push(Immediate(0x7ff80000));
-  __ push(Immediate(0));
-  __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
-  __ jmp(&done);
+    __ bind(&non_nan_result);
 
-  __ bind(&non_nan_result);
+    // Use fpmod to restrict argument to the range +/-2*PI.
+    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
+    __ fldpi();
+    __ fadd(0);
+    __ fld(1);
+    // FPU Stack: input, 2*pi, input.
+    {
+      NearLabel no_exceptions;
+      __ fwait();
+      __ fnstsw_ax();
+      // Clear if Illegal Operand or Zero Division exceptions are set.
+      __ test(Operand(eax), Immediate(5));
+      __ j(zero, &no_exceptions);
+      __ fnclex();
+      __ bind(&no_exceptions);
+    }
 
-  // Use fpmod to restrict argument to the range +/-2*PI.
-  __ mov(edi, eax);  // Save eax before using fnstsw_ax.
-  __ fldpi();
-  __ fadd(0);
-  __ fld(1);
-  // FPU Stack: input, 2*pi, input.
-  {
-    NearLabel no_exceptions;
-    __ fwait();
-    __ fnstsw_ax();
-    // Clear if Illegal Operand or Zero Division exceptions are set.
-    __ test(Operand(eax), Immediate(5));
-    __ j(zero, &no_exceptions);
-    __ fnclex();
-    __ bind(&no_exceptions);
+    // Compute st(0) % st(1)
+    {
+      NearLabel partial_remainder_loop;
+      __ bind(&partial_remainder_loop);
+      __ fprem1();
+      __ fwait();
+      __ fnstsw_ax();
+      __ test(Operand(eax), Immediate(0x400 /* C2 */));
+      // If C2 is set, computation only has partial result. Loop to
+      // continue computation.
+      __ j(not_zero, &partial_remainder_loop);
+    }
+    // FPU Stack: input, 2*pi, input % 2*pi
+    __ fstp(2);
+    __ fstp(0);
+    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
+
+    // FPU Stack: input % 2*pi
+    __ bind(&in_range);
+    switch (type_) {
+      case TranscendentalCache::SIN:
+        __ fsin();
+        break;
+      case TranscendentalCache::COS:
+        __ fcos();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ bind(&done);
+  } else {
+    ASSERT(type_ == TranscendentalCache::LOG);
+    __ fldln2();
+    __ fxch();
+    __ fyl2x();
   }
-
-  // Compute st(0) % st(1)
-  {
-    NearLabel partial_remainder_loop;
-    __ bind(&partial_remainder_loop);
-    __ fprem1();
-    __ fwait();
-    __ fnstsw_ax();
-    __ test(Operand(eax), Immediate(0x400 /* C2 */));
-    // If C2 is set, computation only has partial result. Loop to
-    // continue computation.
-    __ j(not_zero, &partial_remainder_loop);
-  }
-  // FPU Stack: input, 2*pi, input % 2*pi
-  __ fstp(2);
-  __ fstp(0);
-  __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
-
-  // FPU Stack: input % 2*pi
-  __ bind(&in_range);
-  switch (type_) {
-    case TranscendentalCache::SIN:
-      __ fsin();
-      break;
-    case TranscendentalCache::COS:
-      __ fcos();
-      break;
-    default:
-      UNREACHABLE();
-  }
-  __ bind(&done);
 }
 
 
@@ -1701,6 +3036,13 @@
 }
 
 
+void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                                       bool use_sse3,
+                                                       Label* not_int32) {
+  return;
+}
+
+
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   NearLabel load_smi, done;
@@ -1796,6 +3138,22 @@
 }
 
 
+void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                                    Label* non_int32,
+                                                    Register scratch) {
+  __ cvttsd2si(scratch, Operand(xmm0));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm0, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+  __ cvttsd2si(scratch, Operand(xmm1));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm1, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
@@ -1879,6 +3237,12 @@
 }
 
 
+void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                                     Label* non_int32) {
+  return;
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done, undo;
 
@@ -2013,6 +3377,160 @@
 }
 
 
+void MathPowStub::Generate(MacroAssembler* masm) {
+  // Registers are used as follows:
+  // edx = base
+  // eax = exponent
+  // ecx = temporary, result
+
+  CpuFeatures::Scope use_sse2(SSE2);
+  Label allocate_return, call_runtime;
+
+  // Load input parameters.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Save 1 in xmm3 - we need this several times later on.
+  __ mov(ecx, Immediate(1));
+  __ cvtsi2sd(xmm3, Operand(ecx));
+
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &exponent_nonsmi);
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_nonsmi);
+
+  // Optimized version when both exponent and base is a smi.
+  Label powi;
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&powi);
+  // exponent is smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // Optimized version of pow if exponent is a smi.
+  // xmm0 contains the base.
+  __ bind(&powi);
+  __ SmiUntag(eax);
+
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ mov(edx, eax);
+
+  // Get absolute value of exponent.
+  NearLabel no_neg;
+  __ cmp(eax, 0);
+  __ j(greater_equal, &no_neg);
+  __ neg(eax);
+  __ bind(&no_neg);
+
+  // Load xmm1 with 1.
+  __ movsd(xmm1, xmm3);
+  NearLabel while_true;
+  NearLabel no_multiply;
+
+  __ bind(&while_true);
+  __ shr(eax, 1);
+  __ j(not_carry, &no_multiply);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ test(eax, Operand(eax));
+  __ mulsd(xmm0, xmm0);
+  __ j(not_zero, &while_true);
+
+  // base has the original value of the exponent - if the exponent  is
+  // negative return 1/result.
+  __ test(edx, Operand(edx));
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ mov(ecx, Immediate(0x7FB00000));
+  __ movd(xmm0, Operand(ecx));
+  __ cvtss2sd(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  __ j(equal, &call_runtime);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  __ j(parity_even, &call_runtime);
+
+  NearLabel base_not_smi;
+  NearLabel handle_special_cases;
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_not_smi);
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&handle_special_cases);
+
+  __ bind(&base_not_smi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+  __ and_(ecx, HeapNumber::kExponentMask);
+  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  __ j(greater_equal, &call_runtime);
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  NearLabel not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ mov(ecx, Immediate(0xBF000000));
+  __ movd(xmm2, Operand(ecx));
+  __ cvtss2sd(xmm2, xmm2);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half);
+
+  // Calculates reciprocal of square root.
+  // Note that 1/sqrt(x) = sqrt(1/x))
+  __ divsd(xmm3, xmm0);
+  __ movsd(xmm1, xmm3);
+  __ sqrtsd(xmm1, xmm1);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &call_runtime);
+  // Calculates square root.
+  __ movsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+
+  __ bind(&allocate_return);
+  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
+  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
+  __ mov(eax, ecx);
+  __ ret(2);
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
 
@@ -2507,6 +4025,87 @@
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  NearLabel done;
+  __ mov(ebx, Operand(esp, kPointerSize * 3));
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slowcase);
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ j(above, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,
+                        ebx,  // In: Number of elements (times 2, being a smi)
+                        eax,  // Out: Start of allocation (tagged).
+                        ecx,  // Out: End of allocation.
+                        edx,  // Scratch register
+                        &slowcase,
+                        TAG_OBJECT);
+  // eax: Start of allocated area, object-tagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
+  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
+
+  // Set input, index and length fields from arguments.
+  __ mov(ecx, Operand(esp, kPointerSize * 1));
+  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 2));
+  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 3));
+  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
+
+  // Fill out the elements FixedArray.
+  // eax: JSArray.
+  // ebx: FixedArray.
+  // ecx: Number of elements in array, as smi.
+
+  // Set map.
+  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  // Set length.
+  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
+  // Fill contents of fixed-array with the-hole.
+  __ SmiUntag(ecx);
+  __ mov(edx, Immediate(Factory::the_hole_value()));
+  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
+  // Fill fixed array elements with hole.
+  // eax: JSArray.
+  // ecx: Number of elements to fill.
+  // ebx: Start of elements in FixedArray.
+  // edx: the hole.
+  Label loop;
+  __ test(ecx, Operand(ecx));
+  __ bind(&loop);
+  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ sub(Operand(ecx), Immediate(1));
+  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
                                                          Register object,
                                                          Register result,
@@ -3125,7 +4724,7 @@
   __ j(zero, &failure_returned, not_taken);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles_);
   __ ret(0);
 
   // Handling of failure.
@@ -3225,7 +4824,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles_);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -3375,76 +4974,125 @@
 
 
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // Get the object - go slow case if it's a smi.
-  Label slow;
-  __ mov(eax, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, function
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  // Fixed register usage throughout the stub.
+  Register object = eax;  // Object (lhs).
+  Register map = ebx;  // Map of the object.
+  Register function = edx;  // Function (rhs).
+  Register prototype = edi;  // Prototype of the function.
+  Register scratch = ecx;
+
+  // Get the object and function - they are always both needed.
+  Label slow, not_js_object;
+  if (!args_in_registers()) {
+    __ mov(object, Operand(esp, 2 * kPointerSize));
+    __ mov(function, Operand(esp, 1 * kPointerSize));
+  }
 
   // Check that the left hand is a JS object.
-  __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
-  // Get the prototype of the function.
-  __ mov(edx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
-  // edx is function, eax is map.
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(zero, &not_js_object, not_taken);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
 
   // Look up the function and the map in the instanceof cache.
   NearLabel miss;
   ExternalReference roots_address = ExternalReference::roots_address();
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+  __ cmp(function,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
   __ j(not_equal, &miss);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+  __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
   __ j(not_equal, &miss);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
+  __ IncrementCounter(&Counters::instance_of_cache, 1);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   __ bind(&miss);
-  __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
-  __ test(ebx, Immediate(kSmiTagMask));
+  __ test(prototype, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
-  __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
-  // Register mapping:
-  //   eax is object map.
-  //   edx is function.
-  //   ebx is function prototype.
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+  // Update the golbal instanceof cache with the current map and function. The
+  // cached answer will be set when it is known.
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+         function);
 
-  __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
-  // Loop through the prototype chain looking for the function prototype.
+  // Loop through the prototype chain of the object looking for the function
+  // prototype.
+  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   NearLabel loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(ecx, Operand(ebx));
+  __ cmp(scratch, Operand(prototype));
   __ j(equal, &is_instance);
-  __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+  __ cmp(Operand(scratch), Immediate(Factory::null_value()));
   __ j(equal, &is_not_instance);
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
 
   __ bind(&is_instance);
+  __ IncrementCounter(&Counters::instance_of_stub_true, 1);
   __ Set(eax, Immediate(0));
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   __ bind(&is_not_instance);
+  __ IncrementCounter(&Counters::instance_of_stub_false, 1);
   __ Set(eax, Immediate(Smi::FromInt(1)));
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ test(function, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+  __ j(not_equal, &slow, not_taken);
+
+  // Null is not instance of anything.
+  __ cmp(object, Factory::null_value());
+  __ j(not_equal, &object_not_null);
+  __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  __ bind(&object_not_null);
+  // Smi values is not instance of anything.
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(not_zero, &object_not_null_or_smi, not_taken);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  __ bind(&object_not_null_or_smi);
+  // String values is not instance of anything.
+  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
+  __ j(NegateCondition(is_string), &slow);
+  __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
   __ bind(&slow);
+  if (args_in_registers()) {
+    // Push arguments below return address.
+    __ pop(scratch);
+    __ push(object);
+    __ push(function);
+    __ push(scratch);
+  }
+  __ IncrementCounter(&Counters::instance_of_slow, 1);
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
 }
 
@@ -4573,6 +6221,192 @@
   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
+
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+  // Expects two arguments (object, index) on the stack:
+
+  // Stack frame on entry.
+  //  esp[0]: return address
+  //  esp[4]: index
+  //  esp[8]: object
+
+  Register object = ebx;
+  Register index = eax;
+  Register scratch1 = ecx;
+  Register scratch2 = edx;
+  Register result = eax;
+
+  __ pop(scratch1);  // Return address.
+  __ pop(index);
+  __ pop(object);
+  __ push(scratch1);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ Set(result, Immediate(Factory::empty_string()));
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ Set(result, Immediate(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&done);
+  __ ret(0);
+}
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SMIS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ or_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  if (GetCondition() == equal) {
+    // For equality we do not care about the sign of the result.
+    __ sub(eax, Operand(edx));
+  } else {
+    NearLabel done;
+    __ sub(edx, Operand(eax));
+    __ j(no_overflow, &done);
+    // Correct sign of result in case of overflow.
+    __ not_(edx);
+    __ bind(&done);
+    __ mov(eax, edx);
+  }
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  NearLabel generic_stub;
+  NearLabel unordered;
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &generic_stub, not_taken);
+
+  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or SS2 or CMOV is unsupported.
+  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
+    CpuFeatures::Scope scope1(SSE2);
+    CpuFeatures::Scope scope2(CMOV);
+
+    // Load left and right operand
+    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+    // Compare operands
+    __ ucomisd(xmm0, xmm1);
+
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered, not_taken);
+
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    // Performing mov, because xor would destroy the flag register.
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(0);
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+  __ bind(&generic_stub);
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::OBJECTS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  ASSERT(GetCondition() == equal);
+  __ sub(eax, Operand(edx));
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  // Save the registers.
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+  __ EnterInternalFrame();
+  __ push(edx);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+
+  // Compute the entry point of the rewritten stub.
+  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+
+  // Restore registers.
+  __ pop(ecx);
+  __ pop(eax);
+  __ pop(edx);
+  __ push(ecx);
+
+  // Do a tail call to the rewritten stub.
+  __ jmp(Operand(edi));
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 351636f..f66a8c7 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -40,13 +40,21 @@
 // TranscendentalCache runtime function.
 class TranscendentalCacheStub: public CodeStub {
  public:
-  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
-      : type_(type) {}
+  enum ArgumentType {
+    TAGGED = 0,
+    UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+  };
+
+  explicit TranscendentalCacheStub(TranscendentalCache::Type type,
+                                   ArgumentType argument_type)
+      : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
  private:
   TranscendentalCache::Type type_;
+  ArgumentType argument_type_;
+
   Major MajorKey() { return TranscendentalCache; }
-  int MinorKey() { return type_; }
+  int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
   void GenerateOperation(MacroAssembler* masm);
 };
@@ -83,7 +91,7 @@
         args_in_registers_(false),
         args_reversed_(false),
         static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
         name_(NULL) {
     if (static_operands_type_.IsSmi()) {
       mode_ = NO_OVERWRITE;
@@ -117,6 +125,11 @@
         || op_ == Token::MUL || op_ == Token::DIV;
   }
 
+  void SetArgsInRegisters() {
+    ASSERT(ArgsInRegistersSupported());
+    args_in_registers_ = true;
+  }
+
  private:
   Token::Value op_;
   OverwriteMode mode_;
@@ -157,7 +170,7 @@
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
   class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -185,7 +198,6 @@
     return (op_ == Token::ADD) || (op_ == Token::MUL);
   }
 
-  void SetArgsInRegisters() { args_in_registers_ = true; }
   void SetArgsReversed() { args_reversed_ = true; }
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
@@ -207,6 +219,123 @@
     return BinaryOpIC::ToState(runtime_operands_type_);
   }
 
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
+  friend class CodeGenerator;
+};
+
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(TRBinaryOpIC::UNINITIALIZED),
+        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  TypeRecordingBinaryOpStub(
+      int key,
+      TRBinaryOpIC::TypeInfo operands_type,
+      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        operands_type_(operands_type),
+        result_type_(result_type),
+        name_(NULL) { }
+
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ private:
+  enum SmiCodeGenerateHeapNumberResults {
+    ALLOW_HEAPNUMBER_RESULTS,
+    NO_HEAPNUMBER_RESULTS
+  };
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  bool use_sse3_;
+
+  // Operand type information determined at runtime.
+  TRBinaryOpIC::TypeInfo operands_type_;
+  TRBinaryOpIC::TypeInfo result_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           TRBinaryOpIC::GetName(operands_type_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class SSE3Bits: public BitField<bool, 9, 1> {};
+  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+  Major MajorKey() { return TypeRecordingBinaryOp; }
+  int MinorKey() {
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | SSE3Bits::encode(use_sse3_)
+           | OperandTypeInfoBits::encode(operands_type_)
+           | ResultTypeInfoBits::encode(result_type_);
+  }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateGeneric(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm,
+                       Label* slow,
+                       SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+  void GenerateUninitializedStub(MacroAssembler* masm);
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateInt32Stub(MacroAssembler* masm);
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateStringStub(MacroAssembler* masm);
+  void GenerateGenericStub(MacroAssembler* masm);
+
+  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
+  void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return TRBinaryOpIC::ToState(operands_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_type_recording_binary_op_type(operands_type_);
+    code->set_type_recording_binary_op_result_type(result_type_);
+  }
+
   friend class CodeGenerator;
 };
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 04ff200..2f14e82 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -104,12 +104,12 @@
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -7398,6 +7398,7 @@
   Load(args->at(1));
   Load(args->at(2));
   Load(args->at(3));
+
   RegExpExecStub stub;
   Result result = frame_->CallStub(&stub, 4);
   frame_->Push(&result);
@@ -7405,91 +7406,15 @@
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
+
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope;
 
-    Label slowcase;
-    Label done;
-    __ mov(ebx, Operand(esp, kPointerSize * 2));
-    __ test(ebx, Immediate(kSmiTagMask));
-    __ j(not_zero, &slowcase);
-    __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
-    __ j(above, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                          times_half_pointer_size,
-                          ebx,  // In: Number of elements (times 2, being a smi)
-                          eax,  // Out: Start of allocation (tagged).
-                          ecx,  // Out: End of allocation.
-                          edx,  // Scratch register
-                          &slowcase,
-                          TAG_OBJECT);
-    // eax: Start of allocated area, object-tagged.
-
-    // Set JSArray map to global.regexp_result_map().
-    // Set empty properties FixedArray.
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    // Interleave operations for better latency.
-    __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
-    __ mov(ecx, Immediate(Factory::empty_fixed_array()));
-    __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
-    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
-    __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
-    __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
-    __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
-    __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
-    // Set input, index and length fields from arguments.
-    __ pop(FieldOperand(eax, JSRegExpResult::kInputOffset));
-    __ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset));
-    __ pop(ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
-    // Fill out the elements FixedArray.
-    // eax: JSArray.
-    // ebx: FixedArray.
-    // ecx: Number of elements in array, as smi.
-
-    // Set map.
-    __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
-           Immediate(Factory::fixed_array_map()));
-    // Set length.
-    __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
-    // Fill contents of fixed-array with the-hole.
-    __ SmiUntag(ecx);
-    __ mov(edx, Immediate(Factory::the_hole_value()));
-    __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
-    // Fill fixed array elements with hole.
-    // eax: JSArray.
-    // ecx: Number of elements to fill.
-    // ebx: Start of elements in FixedArray.
-    // edx: the hole.
-    Label loop;
-    __ test(ecx, Operand(ecx));
-    __ bind(&loop);
-    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
-    __ sub(Operand(ecx), Immediate(1));
-    __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
-  frame_->Push(eax);
+  RegExpConstructResultStub stub;
+  Result result = frame_->CallStub(&stub, 3);
+  frame_->Push(&result);
 }
 
 
@@ -7987,7 +7912,8 @@
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
   Result result = frame_->CallStub(&stub, 1);
   frame_->Push(&result);
 }
@@ -7996,7 +7922,18 @@
 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
   Result result = frame_->CallStub(&stub, 1);
   frame_->Push(&result);
 }
@@ -8266,7 +8203,6 @@
       switch (op) {
         case Token::SUB: {
           __ neg(value.reg());
-          frame_->Push(&value);
           if (node->no_negative_zero()) {
             // -MIN_INT is MIN_INT with the overflow flag set.
             unsafe_bailout_->Branch(overflow);
@@ -8279,18 +8215,17 @@
         }
         case Token::BIT_NOT: {
           __ not_(value.reg());
-          frame_->Push(&value);
           break;
         }
         case Token::ADD: {
           // Unary plus has no effect on int32 values.
-          frame_->Push(&value);
           break;
         }
         default:
           UNREACHABLE();
           break;
       }
+      frame_->Push(&value);
     } else {
       Load(node->expression());
       bool can_overwrite = node->expression()->ResultOverwriteAllowed();
@@ -9208,7 +9143,7 @@
     case Token::INSTANCEOF: {
       if (!left_already_loaded) Load(left);
       Load(right);
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       Result answer = frame_->CallStub(&stub, 2);
       answer.ToRegister();
       __ test(answer.reg(), Operand(answer.reg()));
@@ -10082,14 +10017,15 @@
 
 #define __ masm.
 
+
+static void MemCopyWrapper(void* dest, const void* src, size_t size) {
+  memcpy(dest, src, size);
+}
+
+
 MemCopyFunction CreateMemCopyFunction() {
-  size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
-  CHECK(buffer);
-  HandleScope handles;
-  MacroAssembler masm(buffer, static_cast<int>(actual_size));
+  HandleScope scope;
+  MacroAssembler masm(NULL, 1 * KB);
 
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10183,6 +10119,7 @@
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10229,6 +10166,7 @@
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10272,6 +10210,7 @@
     __ mov(eax, Operand(src, count, times_1, -4));
     __ mov(Operand(dst, count, times_1, -4), eax);
 
+    __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
     __ pop(esi);
     __ pop(edi);
     __ ret(0);
@@ -10279,8 +10218,15 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
-  return FUNCTION_CAST<MemCopyFunction>(buffer);
+  ASSERT(desc.reloc_size == 0);
+
+  // Copy the generated code into an executable chunk and return a pointer
+  // to the first instruction in it as a C++ function pointer.
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  if (chunk == NULL) return &MemCopyWrapper;
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
 }
 
 #undef __
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index d1a2036..46b12cb 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -43,9 +43,6 @@
 class RegisterFile;
 class RuntimeCallHelper;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
 
 // -------------------------------------------------------------------------
 // Reference support
@@ -310,6 +307,9 @@
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -398,8 +398,9 @@
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -705,8 +706,9 @@
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
   void GenerateMathSqrt(ZoneList<Expression*>* args);
+  void GenerateMathLog(ZoneList<Expression*>* args);
 
-  // Check whether two RegExps are equivalent
+  // Check whether two RegExps are equivalent.
   void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
 
   void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
@@ -782,6 +784,7 @@
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class LCodeGen;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index b15140f..d64257f 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -42,7 +42,11 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Clear();
+  CpuFeatures::Probe(true);
+  if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
new file mode 100644
index 0000000..d95df3e
--- /dev/null
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -0,0 +1,615 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert a absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    int deoptimization_index = table.GetDeoptimizationIndex(i);
+    int gap_code_size = table.GetGapCodeSize(i);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to run again.
+    unsigned instructions = pc_offset - last_pc_offset;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (unsigned i = 0; i < instructions; i++) {
+      destroyer.masm()->int3();
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      CodePatcher patcher(
+          code->instruction_start() + pc_offset + gap_code_size,
+          Assembler::kCallInstructionLength);
+      patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+                           RelocInfo::NONE);
+      last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+    }
+  }
+#ifdef DEBUG
+  // Destroy the code which is not supposed to run again.
+  unsigned instructions = code->safepoint_table_start() - last_pc_offset;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (unsigned i = 0; i < instructions; i++) {
+    destroyer.masm()->int3();
+  }
+#endif
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  node->set_next(deoptimizing_code_list_);
+  deoptimizing_code_list_ = node;
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+  }
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  // The stack check code matches the pattern (on ia32, for example):
+  //
+  //     cmp esp, <limit>
+  //     jae ok
+  //     call <stack guard>
+  // ok: ...
+  //
+  // We will patch the code to:
+  //
+  //     cmp esp, <limit>  ;; Not changed
+  //     nop
+  //     nop
+  //     call <on-stack replacment>
+  // ok:
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
+         *(call_target_address - 2) == 0x05 &&  // offset
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x90;  // nop
+  *(call_target_address - 2) = 0x90;  // nop
+  rinfo->set_target_address(replacement_code->entry());
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+         *(call_target_address - 2) == 0x90 &&  // nop
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x73;  // jae
+  *(call_target_address - 2) = 0x05;  // offset
+  rinfo->set_target_address(check_code->entry());
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+  // TODO(kasperl): This should not be the bailout_id_. It should be
+  // the ast id. Confusing.
+  ASSERT(bailout_id_ == ast_id);
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i = 0; ok && i < 4; i++) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      PrintF("    [esp + %d] <- 0x%08x ; [esp + %d] (fixed part)\n",
+             output_offset,
+             input_value,
+             input_offset);
+    }
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Setup the frame pointer and the context pointer.
+    output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+    output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the function so long as we don't
+  // optimize functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function->context());
+  // The context for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) output_frame->SetRegister(esi.code(), value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Code* continuation = (bailout_type_ == EAGER)
+        ? Builtins::builtin(Builtins::NotifyDeoptimized)
+        : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+
+  if (output_count_ - 1 == frame_index) iterator->Done();
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+  CpuFeatures::Scope scope(SSE2);
+
+  // Save all general purpose registers before messing with them.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  const int kDoubleRegsSize = kDoubleSize *
+                              XMMRegister::kNumAllocatableRegisters;
+  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ movdbl(Operand(esp, offset), xmm_reg);
+  }
+
+  __ pushad();
+
+  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+                                      kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible
+  // and compute the fp-to-sp delta in register edx.
+  if (type() == EAGER) {
+    __ Set(ecx, Immediate(0));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+  } else {
+    __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+  __ sub(edx, Operand(ebp));
+  __ neg(edx);
+
+  // Allocate a new deoptimizer object.
+  __ PrepareCallCFunction(5, eax);
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
+  __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
+  __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
+  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Code address or 0.
+  __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+
+  // Preserve deoptimizer object in register eax and get the input
+  // frame descriptor pointer.
+  __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+
+  // Fill in the input registers.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
+    __ mov(Operand(ebx, offset), ecx);
+  }
+
+  // Fill in the double input registers.
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ movdbl(xmm0, Operand(esp, src_offset));
+    __ movdbl(Operand(ebx, dst_offset), xmm0);
+  }
+
+  // Remove the bailout id and the general purpose registers from the stack.
+  if (type() == EAGER) {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
+  } else {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+
+  // Compute a pointer to the unwinding limit in register ecx; that is
+  // the first stack slot not part of the input frame.
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ add(ecx, Operand(esp));
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(Operand(edx, 0));
+  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, Operand(esp));
+  __ j(not_equal, &pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(eax);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
+  __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+  __ pop(eax);
+
+  // Replace the current frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: eax = current FrameDescription**, edx = one past the
+  // last FrameDescription**.
+  __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+  __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+  __ lea(edx, Operand(eax, edx, times_4, 0));
+  __ bind(&outer_push_loop);
+  // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
+  __ mov(ebx, Operand(eax, 0));
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+  __ test(ecx, Operand(ecx));
+  __ j(not_zero, &inner_push_loop);
+  __ add(Operand(eax), Immediate(kPointerSize));
+  __ cmp(eax, Operand(edx));
+  __ j(below, &outer_push_loop);
+
+  // In case of OSR, we have to restore the XMM registers.
+  if (type() == OSR) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int src_offset = i * kDoubleSize + double_regs_offset;
+      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+    }
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ push(Operand(ebx, FrameDescription::state_offset()));
+  }
+  __ push(Operand(ebx, FrameDescription::pc_offset()));
+  __ push(Operand(ebx, FrameDescription::continuation_offset()));
+
+
+  // Push the registers from the last output frame.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ push(Operand(ebx, offset));
+  }
+
+  // Restore the registers from the stack.
+  __ popad();
+
+  // Return to the continuation point.
+  __ ret(0);
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    __ push_imm32(i);
+    __ jmp(&done);
+    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 52c2b38..dfbcbb7 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -733,7 +733,9 @@
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
+            case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
@@ -1105,6 +1107,21 @@
             } else {
               UnimplementedInstruction();
             }
+          } else if (*data == 0x3A) {
+            data++;
+            if (*data == 0x16) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("pextrd %s,%s,%d",
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm),
+                             static_cast<int>(imm8));
+              data += 2;
+            } else {
+              UnimplementedInstruction();
+            }
           } else if (*data == 0x2E || *data == 0x2F) {
             const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
             data++;
@@ -1127,6 +1144,14 @@
                            NameOfCPURegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0x54) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("andpd %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0x57) {
             data++;
             int mod, regop, rm;
@@ -1147,6 +1172,25 @@
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
             data += PrintRightOperand(data);
+          } else if (*data == 0x70) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            AppendToBuffer("pshufd %s,%s,%d",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm),
+                           static_cast<int>(imm8));
+            data += 2;
+          } else if (*data == 0x73) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            AppendToBuffer("psllq %s,%d",
+                           NameOfXMMRegister(rm),
+                           static_cast<int>(imm8));
+            data += 2;
           } else if (*data == 0x7F) {
             AppendToBuffer("movdqa ");
             data++;
@@ -1154,6 +1198,21 @@
             get_modrm(*data, &mod, &regop, &rm);
             data += PrintRightOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (*data == 0x7E) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movd ");
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (*data == 0xDB) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("pand %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0xE7) {
             AppendToBuffer("movntdq ");
             data++;
@@ -1162,30 +1221,13 @@
             data += PrintRightOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
           } else if (*data == 0xEF) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             AppendToBuffer("pxor %s,%s",
-                            NameOfXMMRegister(regop),
-                            NameOfXMMRegister(rm));
-             data++;
-          } else if (*data == 0x73) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             int8_t imm8 = static_cast<int8_t>(data[1]);
-             AppendToBuffer("psllq %s,%d",
-                            NameOfXMMRegister(rm),
-                            static_cast<int>(imm8));
-             data += 2;
-          } else if (*data == 0x54) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             AppendToBuffer("andpd %s,%s",
-                            NameOfXMMRegister(regop),
-                            NameOfXMMRegister(rm));
-             data++;
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("pxor %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else {
             UnimplementedInstruction();
           }
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index c3fe6c7..8084694 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -49,6 +49,10 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+
+// Number of registers for which space is reserved in safepoints.
+static const int kNumSafepointRegisters = 8;
+
 // ----------------------------------------------------
 
 
@@ -90,6 +94,7 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  static const int kFixedFrameSize    =  4;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 8d6942d..13a1177 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -41,8 +41,61 @@
 namespace v8 {
 namespace internal {
 
+
 #define __ ACCESS_MASM(masm_)
 
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm)
+      : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+    __ test(reg, Immediate(kSmiTagMask));
+    EmitJump(not_carry, target);   // Always taken before patched.
+  }
+
+  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+    __ test(reg, Immediate(kSmiTagMask));
+    EmitJump(carry, target);  // Never taken before patched.
+  }
+
+  void EmitPatchInfo() {
+    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+    ASSERT(is_int8(delta_to_patch_site));
+    __ test(eax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+    info_emitted_ = true;
+#endif
+  }
+
+  bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+  // jc will be patched with jz, jnc will become jnz.
+  void EmitJump(Condition cc, NearLabel* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    ASSERT(cc == carry || cc == not_carry);
+    __ bind(&patch_site_);
+    __ j(cc, target);
+  }
+
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
@@ -168,7 +221,12 @@
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
     NearLabel ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit();
@@ -179,10 +237,6 @@
     __ bind(&ok);
   }
 
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -202,6 +256,27 @@
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  NearLabel ok;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+  // Loop stack checks can be patched to perform on-stack
+  // replacement. In order to decide whether or not to perform OSR we
+  // embed the loop depth in a test instruction after the call so we
+  // can extract it from the OSR builtin.
+  ASSERT(loop_depth() > 0);
+  __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -218,7 +293,7 @@
     Label check_exit_codesize;
     masm_->bind(&check_exit_codesize);
 #endif
-    CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+    SetSourcePosition(function()->end_position() - 1);
     __ RecordJSReturn();
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
@@ -271,6 +346,7 @@
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   // For simplicity we always test the accumulator register.
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -314,22 +390,26 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -369,13 +449,14 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -408,8 +489,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -432,6 +513,10 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (flag) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
@@ -523,6 +608,32 @@
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  NearLabel skip;
+  if (should_normalize) __ jmp(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ cmp(eax, Factory::true_value());
+    Split(equal, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -633,8 +744,10 @@
   Comment cmnt(masm_, "[ SwitchStatement");
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
+
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -659,12 +772,13 @@
     // Perform the comparison as if via '==='.
     __ mov(edx, Operand(esp, 0));  // Switch value.
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
       NearLabel slow_case;
       __ mov(ecx, edx);
       __ or_(ecx, Operand(eax));
-      __ test(ecx, Immediate(kSmiTagMask));
-      __ j(not_zero, &slow_case, not_taken);
+      patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
@@ -672,11 +786,11 @@
       __ bind(&slow_case);
     }
 
-    CompareFlags flags = inline_smi_code
-        ? NO_SMI_COMPARE_IN_STUB
-        : NO_COMPARE_FLAGS;
-    CompareStub stub(equal, true, flags);
-    __ CallStub(&stub);
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    EmitCallIC(ic, &patch_site);
+
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -702,6 +816,7 @@
   }
 
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -853,27 +968,20 @@
   __ bind(&update_each);
   __ mov(result_register(), ebx);
   // Perform the assignment as if via '='.
-  EmitAssignment(stmt->each());
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit;
-  NearLabel stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-  __ jmp(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ jmp(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -888,8 +996,14 @@
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() &&
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      scope()->is_function_scope() &&
       info->num_literals() == 0 &&
       !pretenure) {
     FastNewClosureStub stub;
@@ -1235,12 +1349,15 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForAccumulatorValue(value);
-          __ mov(ecx, Immediate(key->handle()));
-          __ mov(edx, Operand(esp, 0));
           if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(ecx, Immediate(key->handle()));
+            __ mov(edx, Operand(esp, 0));
             Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
           }
           break;
         }
@@ -1288,6 +1405,7 @@
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->constant_elements()));
   if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+    ASSERT(expr->depth() == 1);
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     __ CallStub(&stub);
@@ -1329,6 +1447,8 @@
 
     // Update the write barrier for the array store.
     __ RecordWrite(ebx, offset, result_register(), ecx);
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1373,17 +1493,30 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case KEYED_PROPERTY:
+    case KEYED_PROPERTY: {
       if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
       } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ push(Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
       }
       break;
+    }
   }
 
   if (expr->is_compound()) {
@@ -1401,6 +1534,12 @@
       }
     }
 
+    // For property compound assignments we need another deoptimization
+    // point after the property load.
+    if (property != NULL) {
+      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+    }
+
     Token::Value op = expr->binary_op();
     ConstantOperand constant = ShouldInlineSmiCase(op)
         ? GetConstantOperand(op, expr->target(), expr->value())
@@ -1426,6 +1565,9 @@
     } else {
       EmitBinaryOp(op, mode);
     }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1438,6 +1580,8 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(eax);
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1469,26 +1613,25 @@
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
-  NearLabel call_stub;
-  Label done;
+  NearLabel call_stub, done;
   __ add(Operand(eax), Immediate(value));
   __ j(overflow, &call_stub);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &done);
 
   // Undo the optimistic add operation and call the shared stub.
   __ bind(&call_stub);
   __ sub(Operand(eax), Immediate(value));
   Token::Value op = Token::ADD;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  TypeRecordingBinaryOpStub stub(op, mode);
   if (left_is_constant_smi) {
-    __ push(Immediate(value));
-    __ push(eax);
+    __ mov(edx, Immediate(value));
   } else {
-    __ push(eax);
-    __ push(Immediate(value));
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
-  __ CallStub(&stub);
+  EmitCallIC(stub.GetCode(), &patch_site);
+
   __ bind(&done);
   context()->Plug(eax);
 }
@@ -1498,7 +1641,7 @@
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
-  Label call_stub, done;
+  NearLabel call_stub, done;
   if (left_is_constant_smi) {
     __ mov(ecx, eax);
     __ mov(eax, Immediate(value));
@@ -1507,24 +1650,22 @@
     __ sub(Operand(eax), Immediate(value));
   }
   __ j(overflow, &call_stub);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &done);
 
   __ bind(&call_stub);
-  if (left_is_constant_smi)  {
-    __ push(Immediate(value));
-    __ push(ecx);
+  if (left_is_constant_smi) {
+    __ mov(edx, Immediate(value));
+    __ mov(eax, ecx);
   } else {
-    // Undo the optimistic sub operation.
-    __ add(Operand(eax), Immediate(value));
-
-    __ push(eax);
-    __ push(Immediate(value));
+    __ add(Operand(eax), Immediate(value));  // Undo the subtraction.
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
-
   Token::Value op = Token::SUB;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  __ CallStub(&stub);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
+
   __ bind(&done);
   context()->Plug(eax);
 }
@@ -1534,19 +1675,21 @@
                                                Token::Value op,
                                                OverwriteMode mode,
                                                Smi* value) {
-  Label call_stub, smi_case, done;
+  NearLabel call_stub, smi_case, done;
   int shift_value = value->value() & 0x1f;
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
 
+  // Call stub.
   __ bind(&call_stub);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  __ push(eax);
-  __ push(Immediate(value));
-  __ CallStub(&stub);
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   switch (op) {
     case Token::SHL:
@@ -1596,18 +1739,19 @@
                                              Token::Value op,
                                              OverwriteMode mode,
                                              Smi* value) {
-  Label smi_case, done;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  NearLabel smi_case, done;
 
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
+
   // The order of the arguments does not matter for bit-ops with a
   // constant operand.
-  __ push(Immediate(value));
-  __ push(eax);
-  __ CallStub(&stub);
+  __ mov(edx, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   switch (op) {
     case Token::BIT_OR:
@@ -1675,24 +1819,20 @@
 
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
-  Label done, stub_call, smi_case;
+  NearLabel done, smi_case, stub_call;
   __ pop(edx);
   __ mov(ecx, eax);
   __ or_(eax, Operand(edx));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
 
   __ bind(&stub_call);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  if (stub.ArgsInRegistersSupported()) {
-    stub.GenerateCall(masm_, edx, ecx);
-  } else {
-    __ push(edx);
-    __ push(ecx);
-    __ CallStub(&stub);
-  }
+  __ mov(eax, ecx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   __ mov(eax, edx);  // Copy left operand in case of a stub call.
 
@@ -1769,20 +1909,14 @@
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      OverwriteMode mode) {
-  TypeInfo type = TypeInfo::Unknown();
-  GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
-  if (stub.ArgsInRegistersSupported()) {
-    __ pop(edx);
-    stub.GenerateCall(masm_, edx, eax);
-  } else {
-    __ push(result_register());
-    __ CallStub(&stub);
-  }
+  __ pop(edx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1830,6 +1964,8 @@
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(eax);
 }
 
 
@@ -1902,8 +2038,6 @@
     }
     __ bind(&done);
   }
-
-  context()->Plug(eax);
 }
 
 
@@ -1940,10 +2074,10 @@
     __ push(Operand(esp, kPointerSize));  // Receiver is under value.
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    context()->DropAndPlug(1, eax);
-  } else {
-    context()->Plug(eax);
+    __ Drop(1);
   }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(eax);
 }
 
 
@@ -1981,6 +2115,7 @@
     __ pop(eax);
   }
 
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
 }
 
@@ -1992,13 +2127,14 @@
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(eax);
   }
-  context()->Plug(eax);
 }
 
 
@@ -2008,17 +2144,18 @@
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
     __ Set(ecx, Immediate(name));
   }
   // Record source position of the IC call.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->Plug(eax);
@@ -2040,17 +2177,18 @@
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position of the IC call.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
@@ -2061,16 +2199,17 @@
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);
@@ -2078,6 +2217,12 @@
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -2089,7 +2234,7 @@
     // arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
-    { PreserveStatementPositionScope pos_scope(masm()->positions_recorder());
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
       VisitForStackValue(fun);
       // Reserved receiver slot.
       __ push(Immediate(Factory::undefined_value()));
@@ -2119,10 +2264,11 @@
       __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
     }
     // Record source position for debugger.
-    SetSourcePosition(expr->position(), FORCED_POSITION);
+    SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
+    RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, eax);
@@ -2135,7 +2281,7 @@
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
       EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
@@ -2181,15 +2327,15 @@
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use keyed EmitCallIC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       if (prop->is_synthetic()) {
-        { PreserveStatementPositionScope scope(masm()->positions_recorder());
+        { PreservePositionScope scope(masm()->positions_recorder());
           VisitForAccumulatorValue(prop->key());
         }
         // Record source code position for IC call.
-        SetSourcePosition(prop->position(), FORCED_POSITION);
+        SetSourcePosition(prop->position());
         __ pop(edx);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -2214,7 +2360,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
     // Load global receiver object.
@@ -2223,6 +2369,11 @@
     // Emit function call.
     EmitCallWithStub(expr);
   }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
@@ -2270,6 +2421,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2289,6 +2441,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2321,6 +2474,7 @@
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2342,6 +2496,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2365,6 +2520,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2384,9 +2540,9 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  // TODO(3110205): Implement this.
+  // Currently unimplemented.  Emit false, a safe choice.
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
@@ -2407,6 +2563,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2428,6 +2585,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2449,6 +2607,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2480,6 +2639,7 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2502,6 +2662,7 @@
 
   __ pop(ebx);
   __ cmp(eax, Operand(ebx));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2719,7 +2880,9 @@
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  __ CallRuntime(Runtime::kMath_pow, 2);
+
+  MathPowStub stub;
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2904,7 +3067,8 @@
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2914,7 +3078,19 @@
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2951,11 +3127,13 @@
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2965,7 +3143,71 @@
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = eax;
+  Register index_1 = ebx;
+  Register index_2 = ecx;
+  Register elements = edi;
+  Register temp = edx;
+  __ mov(object, Operand(esp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, &slow_case);
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            KeyedLoadIC::kSlowCaseBitFieldMask);
+  __ j(not_zero, &slow_case);
+
+  // Check the object's elements are in fast case and writable.
+  __ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &slow_case);
+
+  // Check that both indices are smis.
+  __ mov(index_1, Operand(esp, 1 * kPointerSize));
+  __ mov(index_2, Operand(esp, 0));
+  __ mov(temp, index_1);
+  __ or_(temp, Operand(index_2));
+  __ test(temp, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow_case);
+
+  // Check that both indices are valid.
+  __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
+  __ cmp(temp, Operand(index_1));
+  __ j(below_equal, &slow_case);
+  __ cmp(temp, Operand(index_2));
+  __ j(below_equal, &slow_case);
+
+  // Bring addresses into index1 and index2.
+  __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
+  __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
+
+  // Swap elements.  Use object and temp as scratch registers.
+  __ mov(object, Operand(index_1, 0));
+  __ mov(temp,   Operand(index_2, 0));
+  __ mov(Operand(index_2, 0), object);
+  __ mov(Operand(index_1, 0), temp);
+
+  Label new_space;
+  __ InNewSpace(elements, temp, equal, &new_space);
+
+  __ mov(object, elements);
+  __ RecordWriteHelper(object, index_1, temp);
+  __ RecordWriteHelper(elements, index_2, temp);
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ mov(eax, Factory::undefined_value());
+  __ jmp(&done);
+
+  __ bind(&slow_case);
   __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
   context()->Plug(eax);
 }
 
@@ -3073,6 +3315,7 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -3377,6 +3620,7 @@
       // Notice that the labels are swapped.
       context()->PrepareTest(&materialize_true, &materialize_false,
                              &if_false, &if_true, &fall_through);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
       context()->Plug(if_false, if_true);  // Labels swapped.
       break;
@@ -3493,14 +3737,24 @@
       __ push(eax);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+        __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
       EmitKeyedPropertyLoad(prop);
     }
   }
 
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  PrepareForBailout(expr->increment(), TOS_REG);
+
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3532,8 +3786,9 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call;
-  Label done;
+  NearLabel stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
@@ -3543,8 +3798,8 @@
     __ j(overflow, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &done);
+    patch_site.EmitJumpIfSmi(eax, &done);
+
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
@@ -3553,12 +3808,16 @@
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
     }
   }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   // Call stub for +1/-1.
-  GenericBinaryOpStub stub(expr->binary_op(),
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Unknown());
-  stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(Smi::FromInt(1)));
+  TypeRecordingBinaryOpStub stub(expr->binary_op(),
+                                 NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ bind(&done);
 
   // Store the value returned in eax.
@@ -3569,6 +3828,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(eax);
         }
         // For all contexts except EffectContext We have the result on
         // top of the stack.
@@ -3579,6 +3840,8 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(eax);
       }
       break;
     case NAMED_PROPERTY: {
@@ -3586,6 +3849,7 @@
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3600,6 +3864,7 @@
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
         if (!context()->IsEffect()) {
@@ -3627,6 +3892,7 @@
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL &&
              proxy->var()->AsSlot() != NULL &&
@@ -3642,12 +3908,13 @@
     __ push(esi);
     __ push(Immediate(proxy->name()));
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    Visit(expr);
+    context()->HandleExpression(expr);
   }
 }
 
@@ -3672,6 +3939,7 @@
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(left_unary->expression());
   }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(Heap::number_symbol())) {
     __ test(eax, Immediate(kSmiTagMask));
@@ -3767,14 +4035,17 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ cmp(eax, Factory::true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
 
     case Token::INSTANCEOF: {
       VisitForStackValue(expr->right());
-      InstanceofStub stub;
+      __ IncrementCounter(&Counters::instance_of_full, 1);
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -3820,22 +4091,23 @@
       }
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         NearLabel slow_case;
         __ mov(ecx, Operand(edx));
         __ or_(ecx, Operand(eax));
-        __ test(ecx, Immediate(kSmiTagMask));
-        __ j(not_zero, &slow_case, not_taken);
+        patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
         __ cmp(edx, Operand(eax));
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
 
-      CompareFlags flags = inline_smi_code
-          ? NO_SMI_COMPARE_IN_STUB
-          : NO_COMPARE_FLAGS;
-      CompareStub stub(cc, strict, flags);
-      __ CallStub(&stub);
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      EmitCallIC(ic, &patch_site);
+
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       Split(cc, if_true, if_false, fall_through);
     }
@@ -3856,6 +4128,8 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
   __ cmp(eax, Factory::null_value());
   if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
@@ -3894,8 +4168,31 @@
 void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(&Counters::named_load_full, 1);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(&Counters::keyed_load_full, 1);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(&Counters::named_store_full, 1);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(&Counters::keyed_store_full, 1);
+    default:
+      break;
+  }
+
   __ call(ic, mode);
 
+  // Crankshaft doesn't need patching of inlined loads and stores.
+  // When compiling the snapshot we need to produce code that works
+  // with and without Crankshaft.
+  if (V8::UseCrankshaft() && !Serializer::enabled()) {
+    return;
+  }
+
   // If we're calling a (keyed) load or store stub, we have to mark
   // the call as containing no inlined code so we will not attempt to
   // patch it.
@@ -3913,6 +4210,16 @@
 }
 
 
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+  __ call(ic, RelocInfo::CODE_TARGET);
+  if (patch_site != NULL && patch_site->is_bound()) {
+    patch_site->EmitPatchInfo();
+  } else {
+    __ nop();  // Signals no inlined code.
+  }
+}
+
+
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index ddfbb91..9c9304d 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -108,9 +108,6 @@
                                            Register name,
                                            Register r0,
                                            Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
@@ -713,7 +710,7 @@
   char_at_generator.GenerateFast(masm);
   __ ret(0);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -1552,14 +1549,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  // Check if the name is a string.
-  Label miss;
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
-  Condition cond = masm->IsObjectStringType(ecx, eax, eax);
-  __ j(NegateCondition(cond), &miss);
   GenerateCallNormal(masm, argc);
-  __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
@@ -1639,16 +1629,15 @@
 }
 
 
-// One byte opcode for test eax,0xXXXXXXXX.
-static const byte kTestEaxByte = 0xA9;
-
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   Address delta_address = test_instruction_address + 1;
   // The delta to the start of the map check instruction.
@@ -1692,6 +1681,8 @@
                                         Object* map,
                                         Object* cell,
                                         bool is_dont_delete) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address mov_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1723,13 +1714,15 @@
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Extract the encoded deltas from the test eax instruction.
   Address encoded_offsets_address = test_instruction_address + 1;
@@ -1769,11 +1762,13 @@
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Fetch the offset from the test instruction to the map cmp
   // instruction.  This offset is stored in the last 4 bytes of the 5
@@ -1969,6 +1964,24 @@
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedStoreIC_Miss(Arguments args);
 
@@ -2010,9 +2023,107 @@
   __ TailCallExternalReference(ref, 3, 1);
 }
 
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+static bool HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+
+  State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+
+  // Activate inlined smi code.
+  if (previous_state == UNINITIALIZED) {
+    PatchInlinedSmiCode(address());
+  }
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  if (*test_instruction_address != Assembler::kTestAlByte) {
+    ASSERT(*test_instruction_address == Assembler::kNopByte);
+    return;
+  }
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
+           address, test_instruction_address, delta);
+  }
+
+  // Patch with a short conditional jump. There must be a
+  // short jump-if-carry/not-carry at this position.
+  Address jmp_address = test_instruction_address - delta;
+  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+         *jmp_address == Assembler::kJcShortOpcode);
+  Condition cc = *jmp_address == Assembler::kJncShortOpcode
+      ? not_zero
+      : zero;
+  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
new file mode 100644
index 0000000..d64f528
--- /dev/null
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -0,0 +1,3276 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-codegen-ia32.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public PostCallGenerator {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void Generate() {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(SSE2);
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
+#endif
+
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ push(Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+    } else {
+      __ sub(Operand(esp), Immediate(slots * kPointerSize));
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), StackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+  return XMMRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Immediate LCodeGen::ToImmediate(LOperand* op) {
+  LConstantOperand* const_op = LConstantOperand::cast(op);
+  Handle<Object> literal = chunk_->LookupLiteral(const_op);
+  Representation r = chunk_->LookupLiteralRepresentation(const_op);
+  if (r.IsInteger32()) {
+    ASSERT(literal->IsNumber());
+    return Immediate(static_cast<int32_t>(literal->Number()));
+  } else if (r.IsDouble()) {
+    Abort("unsupported double immediate");
+  }
+  ASSERT(r.IsTagged());
+  return Immediate(literal);
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  if (op->IsRegister()) return Operand(ToRegister(op));
+  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return Operand(ebp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return Operand(ebp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  if (instr != NULL) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    __ call(code, mode);
+    RegisterLazyDeoptimization(instr);
+  } else {
+    LPointerMap no_pointers(0);
+    RecordPosition(no_pointers.position());
+    __ call(code, mode);
+    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+  }
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  // Runtime calls to Throw are not supposed to ever return at the
+  // call site, so don't register lazy deoptimization for these. We do
+  // however have to record a safepoint since throwing exceptions can
+  // cause garbage collections.
+  // BUG(3243555): register a lazy deoptimization point at throw. We need
+  // it to be able to inline functions containing a throw statement.
+  if (!instr->IsThrow()) {
+    RegisterLazyDeoptimization(instr);
+  } else {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    environment->WriteTranslation(this, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0) {
+    Handle<SharedFunctionInfo> shared(info_->shared_info());
+    Label no_deopt;
+    __ pushfd();
+    __ push(eax);
+    __ push(ebx);
+    __ mov(ebx, shared);
+    __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
+    __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+    __ j(not_zero, &no_deopt);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+
+    __ bind(&no_deopt);
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+  }
+
+  if (cc == no_condition) {
+    if (FLAG_trap_on_deopt) __ int3();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    if (FLAG_trap_on_deopt) {
+      NearLabel done;
+      __ j(NegateCondition(cc), &done);
+      __ int3();
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
+    } else {
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+    }
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register esi always contains a pointer to the context.
+  safepoint.DefinePointerRegister(esi);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // xmm0 must always be a scratch register.
+  XMMRegister xmm_scratch = xmm0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register cpu_scratch = esi;
+  bool destroys_cpu_scratch = false;
+
+  LGapResolver resolver(move->move_operands(), &marker_operand);
+  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(xmm_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
+    if (from->IsConstantOperand()) {
+      __ mov(ToOperand(to), ToImmediate(from));
+    } else if (from == &marker_operand) {
+      if (to->IsRegister() || to->IsStackSlot()) {
+        __ mov(ToOperand(to), cpu_scratch);
+        ASSERT(destroys_cpu_scratch);
+      } else {
+        ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
+        __ movdbl(ToOperand(to), xmm_scratch);
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister() || from->IsStackSlot()) {
+        __ mov(cpu_scratch, ToOperand(from));
+        destroys_cpu_scratch = true;
+      } else {
+        ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
+        __ movdbl(xmm_scratch, ToOperand(from));
+      }
+    } else if (from->IsRegister()) {
+      __ mov(ToOperand(to), ToRegister(from));
+    } else if (to->IsRegister()) {
+      __ mov(ToRegister(to), ToOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ push(eax);
+      __ mov(eax, ToOperand(from));
+      __ mov(ToOperand(to), eax);
+      __ pop(eax);
+    } else if (from->IsDoubleRegister()) {
+      __ movdbl(ToOperand(to), ToDoubleRegister(from));
+    } else if (to->IsDoubleRegister()) {
+      __ movdbl(ToDoubleRegister(to), ToOperand(from));
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      __ movdbl(xmm_scratch, ToOperand(from));
+      __ movdbl(ToOperand(to), xmm_scratch);
+    }
+  }
+
+  if (destroys_cpu_scratch) {
+    __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCharAt: {
+      StringCharAtStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::MathPow: {
+      MathPowStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      TranscendentalCacheStub stub(instr->transcendental_type(),
+                                   TranscendentalCacheStub::TAGGED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(edx));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register right_reg = ToRegister(right);
+
+  // Check for x % 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+
+  // Check for (0 % -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel positive_left;
+    NearLabel done;
+    __ test(eax, Operand(eax));
+    __ j(not_sign, &positive_left);
+    __ idiv(right_reg);
+
+    // Test the remainder for 0, because then the result would be -0.
+    __ test(edx, Operand(edx));
+    __ j(not_zero, &done);
+
+    DeoptimizeIf(no_condition, instr->environment());
+    __ bind(&positive_left);
+    __ idiv(right_reg);
+    __ bind(&done);
+  } else {
+    __ idiv(right_reg);
+  }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(eax));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register left_reg = eax;
+
+  // Check for x / 0.
+  Register right_reg = ToRegister(right);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel left_not_zero;
+    __ test(left_reg, Operand(left_reg));
+    __ j(not_zero, &left_not_zero);
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(sign, instr->environment());
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    NearLabel left_not_min_int;
+    __ cmp(left_reg, kMinInt);
+    __ j(not_zero, &left_not_min_int);
+    __ cmp(right_reg, -1);
+    DeoptimizeIf(zero, instr->environment());
+    __ bind(&left_not_min_int);
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+  __ idiv(right_reg);
+
+  // Deoptimize if remainder is not 0.
+  __ test(edx, Operand(edx));
+  DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ mov(ToRegister(instr->temp()), left);
+  }
+
+  if (right->IsConstantOperand()) {
+    __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+  } else {
+    __ imul(left, ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    NearLabel done;
+    __ test(left, Operand(left));
+    __ j(not_zero, &done);
+    if (right->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr->environment());
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ or_(ToRegister(instr->temp()), ToOperand(right));
+      DeoptimizeIf(sign, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int right_operand = ToInteger32(LConstantOperand::cast(right));
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), right_operand);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  if (right->IsRegister()) {
+    ASSERT(ToRegister(right).is(ecx));
+
+    switch (instr->op()) {
+      case Token::SAR:
+        __ sar_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shr_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        }
+        break;
+      case Token::SHL:
+        __ shl_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sar(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        } else {
+          __ shr(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ shl(ToRegister(left), shift_count);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ sub(ToOperand(left), ToImmediate(right));
+  } else {
+    __ sub(ToRegister(left), ToOperand(right));
+  }
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  XMMRegister res = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  // Use xor to produce +0.0 in a fast and compact way, but avoid to
+  // do so if the constant is -0.0.
+  if (BitCast<uint64_t, double>(v) == 0) {
+    __ xorpd(res, res);
+  } else {
+    int32_t v_int32 = static_cast<int32_t>(v);
+    if (static_cast<double>(v_int32) == v) {
+      __ push_imm32(v_int32);
+      __ cvtsi2sd(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kPointerSize));
+    } else {
+      uint64_t int_val = BitCast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+      __ push_imm32(upper);
+      __ push_imm32(lower);
+      __ movdbl(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+    }
+  }
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoArrayLength(LArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->IsLoadElements()) {
+    // We load the length directly from the elements array.
+    Register elements = ToRegister(instr->input());
+    __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
+  } else {
+    // Check that the receiver really is an array.
+    Register array = ToRegister(instr->input());
+    Register temporary = ToRegister(instr->temporary());
+    __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
+    DeoptimizeIf(not_equal, instr->environment());
+
+    // Load length directly from the array.
+    __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
+  }
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->temporary());
+  ASSERT(input.is(result));
+  NearLabel done;
+  // If the object is a smi return the object.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+
+  // If the object is not a value type, return the object.
+  __ CmpObjectType(input, JS_VALUE_TYPE, map);
+  __ j(not_equal, &done);
+  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->Equals(instr->result()));
+  __ not_(ToRegister(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  __ push(ToOperand(instr->input()));
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    Comment("Unreachable code.");
+    __ int3();
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ add(ToOperand(left), ToImmediate(right));
+  } else {
+    __ add(ToRegister(left), ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  // Modulo uses a fixed result register.
+  ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+  switch (instr->op()) {
+    case Token::ADD:
+      __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::SUB:
+       __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+       break;
+    case Token::MUL:
+      __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::DIV:
+      __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::MOD: {
+      // Pass two doubles as arguments on the stack.
+      __ PrepareCallCFunction(4, eax);
+      __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+      __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+      __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+
+      // Return value is in st(0) on ia32.
+      // Store it into the (fixed) result register.
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+      __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->left()).is(edx));
+  ASSERT(ToRegister(instr->right()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    __ jmp(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->input());
+    __ test(reg, Operand(reg));
+    EmitBranch(true_block, false_block, not_zero);
+  } else if (r.IsDouble()) {
+    XMMRegister reg = ToDoubleRegister(instr->input());
+    __ xorpd(xmm0, xmm0);
+    __ ucomisd(reg, xmm0);
+    EmitBranch(true_block, false_block, not_equal);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->input());
+    if (instr->hydrogen()->type().IsBoolean()) {
+      __ cmp(reg, Factory::true_value());
+      EmitBranch(true_block, false_block, equal);
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      __ cmp(reg, Factory::undefined_value());
+      __ j(equal, false_label);
+      __ cmp(reg, Factory::true_value());
+      __ j(equal, true_label);
+      __ cmp(reg, Factory::false_value());
+      __ j(equal, false_label);
+      __ test(reg, Operand(reg));
+      __ j(equal, false_label);
+      __ test(reg, Immediate(kSmiTagMask));
+      __ j(zero, true_label);
+
+      // Test for double values. Zero is false.
+      NearLabel call_stub;
+      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      __ j(not_equal, &call_stub);
+      __ fldz();
+      __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+      __ FCmp();
+      __ j(zero, false_label);
+      __ jmp(true_label);
+
+      // The conversion stub doesn't cause garbage collections so it's
+      // safe to not record a safepoint after the call.
+      __ bind(&call_stub);
+      ToBooleanStub stub;
+      __ pushad();
+      __ push(reg);
+      __ CallStub(&stub);
+      __ test(eax, Operand(eax));
+      __ popad();
+      EmitBranch(true_block, false_block, not_zero);
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    // Perform stack overflow check if this goto needs it before jumping.
+    if (deferred_stack_check != NULL) {
+      ExternalReference stack_limit =
+          ExternalReference::address_of_stack_limit();
+      __ cmp(esp, Operand::StaticVariable(stack_limit));
+      __ j(above_equal, chunk_->GetAssemblyLabel(block));
+      __ jmp(deferred_stack_check->entry());
+      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+    } else {
+      __ jmp(chunk_->GetAssemblyLabel(block));
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  __ pushad();
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ popad();
+}
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LGoto* instr_;
+  };
+
+  DeferredStackCheck* deferred = NULL;
+  if (instr->include_stack_check()) {
+    deferred = new DeferredStackCheck(this, instr);
+  }
+  EmitGoto(instr->block_id(), deferred);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  if (right->IsConstantOperand()) {
+    __ cmp(ToOperand(left), ToImmediate(right));
+  } else {
+    __ cmp(ToRegister(left), ToOperand(right));
+  }
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  NearLabel unordered;
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the unordered case, which produces a false value.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, &unordered, not_taken);
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  NearLabel done;
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
+  __ j(cc, &done);
+
+  __ bind(&unordered);
+  __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the false block.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(equal, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Register reg = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Materialize false.
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    NearLabel done;
+    __ j(equal, &done);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ bind(&done);
+  } else {
+    NearLabel true_value, false_value, done;
+    __ j(equal, &true_value);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, &true_value);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, &false_value);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = result;
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    __ j(not_zero, &true_value);
+    __ bind(&false_value);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ jmp(&done);
+    __ bind(&true_value);
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    EmitBranch(true_block, false_block, equal);
+  } else {
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ j(equal, true_label);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = ToRegister(instr->temp());
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    EmitBranch(true_block, false_block, not_zero);
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  ASSERT(!input.is(temp1));
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp1.is(temp2));
+
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(equal, is_not_object);
+
+  __ cmp(input, Factory::null_value());
+  __ j(equal, is_object);
+
+  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
+  __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, is_not_object);
+
+  __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
+  __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+  __ j(below, is_not_object);
+  __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+  return below_equal;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+  Register reg = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+  Label is_false, is_true, done;
+
+  Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+  __ j(true_cond, &is_true);
+
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ jmp(&done);
+
+  __ bind(&is_true);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  Operand input = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Operand input = ToOperand(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(input, Immediate(kSmiTagMask));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  NearLabel done, is_false;
+  __ j(zero, &is_false);
+  __ CmpObjectType(input, instr->TestType(), result);
+  __ j(NegateCondition(instr->BranchCondition()), &is_false);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, false_label);
+
+  __ CmpObjectType(input, instr->TestType(), temp);
+  EmitBranch(true_block, false_block, instr->BranchCondition());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  NearLabel done;
+  __ j(not_zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, not_equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, is_false);
+  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, is_false);
+
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(temp, FieldOperand(temp,
+                            SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+  __ cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  ASSERT(input.is(result));
+  Register temp = ToRegister(instr->temporary());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+  NearLabel done;
+  Label is_true, is_false;
+
+  EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
+
+  __ j(not_equal, &is_false);
+
+  __ bind(&is_true);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temporary());
+  Register temp2 = ToRegister(instr->temporary2());
+  if (input.is(temp)) {
+    // Swap.
+    Register swapper = temp;
+    temp = temp2;
+    temp2 = swapper;
+  }
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  // Object and function are in fixed registers eax and edx.
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(zero, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(condition, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  // The compare stub expects compare condition and the input operands
+  // reversed for GT and LTE.
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, condition);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Preserve the return value on the stack and rely on the runtime
+    // call to return the value in the same register.
+    __ push(eax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  __ mov(esp, ebp);
+  __ pop(ebp);
+  __ ret((ParameterCount() + 1) * kPointerSize);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
+  if (instr->hydrogen()->check_hole_value()) {
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Register value = ToRegister(instr->input());
+  __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  ASSERT(instr->result()->Equals(instr->input()));
+  Register reg = ToRegister(instr->input());
+  __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    NearLabel done;
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    __ j(equal, &done);
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_cow_array_map()));
+    __ Check(equal, "Check for fast elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Operand index = ToOperand(instr->index());
+  Register result = ToRegister(instr->result());
+
+  __ sub(length, index);
+  DeoptimizeIf(below_equal, instr->environment());
+
+  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = ToRegister(instr->key());
+  Register result;
+  if (instr->load_result() != NULL) {
+    result = ToRegister(instr->load_result());
+  } else {
+    result = ToRegister(instr->result());
+    ASSERT(result.is(elements));
+  }
+
+  // Load the result.
+  __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    // Untag and check for smi.
+    __ SmiUntag(result);
+    DeoptimizeIf(carry, instr->environment());
+  } else if (r.IsDouble()) {
+    EmitNumberUntagD(result,
+                     ToDoubleRegister(instr->result()),
+                     instr->environment());
+  } else {
+    // Check for the hole value.
+    ASSERT(r.IsTagged());
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  // Check for arguments adapter frame.
+  Label done, adapted;
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(result),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adapted);
+
+  // No arguments adaptor frame.
+  __ mov(result, Operand(ebp));
+  __ jmp(&done);
+
+  // Arguments adaptor frame present.
+  __ bind(&adapted);
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Done. Pointer to topmost argument is in result.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Operand elem = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // No arguments adaptor frame. Number of arguments is fixed.
+  __ cmp(ebp, elem);
+  __ mov(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result,
+                         ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Done. Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  // If the receiver is null or undefined, we have to pass the
+  // global object as a receiver.
+  NearLabel global_receiver, receiver_ok;
+  __ cmp(receiver, Factory::null_value());
+  __ j(equal, &global_receiver);
+  __ cmp(receiver, Factory::undefined_value());
+  __ j(not_equal, &receiver_ok);
+  __ bind(&global_receiver);
+  __ mov(receiver, GlobalObjectOperand());
+  __ bind(&receiver_ok);
+
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+
+  Label invoke;
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, kArgumentsLimit);
+  DeoptimizeIf(above, instr->environment());
+
+  __ push(receiver);
+  __ mov(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label loop;
+  // length is a small non-negative integer, due to the test above.
+  __ test(length, Operand(length));
+  __ j(zero, &invoke);
+  __ bind(&loop);
+  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+  __ dec(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  ASSERT(receiver.is(eax));
+  v8::internal::ParameterCount actual(eax);
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->input();
+  if (argument->IsConstantOperand()) {
+    __ push(ToImmediate(argument));
+  } else {
+    __ push(ToOperand(argument));
+  }
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  }
+
+  // Set eax to arguments count if adaption is not needed. Assumes that eax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(eax, arity);
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  if (*function == *graph()->info()->closure()) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->function());
+  CallKnownFunction(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input_reg = ToRegister(instr->input());
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  DeoptimizeIf(not_equal, instr->environment());
+
+  Label done;
+  Register tmp = input_reg.is(eax) ? ecx : eax;
+  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  Label negative;
+  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  __ test(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(not_zero, &negative);
+  __ mov(tmp, input_reg);
+  __ jmp(&done);
+
+  __ bind(&negative);
+
+  Label allocated, slow;
+  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+  __ jmp(&allocated);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(eax)) __ mov(tmp, eax);
+
+  // Restore input_reg after call to runtime.
+  __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
+
+  __ bind(&allocated);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ and_(tmp2, ~HeapNumber::kSignMask);
+  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+
+  __ bind(&done);
+  __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  ASSERT(instr->input()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    XMMRegister  scratch = xmm0;
+    XMMRegister input_reg = ToDoubleRegister(instr->input());
+    __ pxor(scratch, scratch);
+    __ subsd(scratch, input_reg);
+    __ pand(input_reg, scratch);
+  } else if (r.IsInteger32()) {
+    Register input_reg = ToRegister(instr->input());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+    __ bind(&is_positive);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Label not_smi;
+    Register input_reg = ToRegister(instr->input());
+    // Smi check.
+    __ test(input_reg, Immediate(kSmiTagMask));
+    __ j(not_zero, deferred->entry());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+
+    __ bind(&is_positive);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+  __ ucomisd(input_reg, xmm_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+
+  // xmm_scratch = 0.5
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+
+  // input = input + 0.5
+  __ addsd(input_reg, xmm_scratch);
+
+  // We need to return -0 for the input range [-0.5, 0[, otherwise
+  // compute Math.floor(value + 0.5).
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    // If we don't need to bailout on -0, we check only bailout
+    // on negative inputs.
+    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Compute Math.floor(value + 0.5).
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  ExternalReference negative_infinity =
+      ExternalReference::address_of_negative_infinity();
+  __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
+  __ ucomisd(xmm_scratch, input_reg);
+  DeoptimizeIf(equal, instr->environment());
+  __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  if (exponent_type.IsDouble()) {
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+  } else if (exponent_type.IsInteger32()) {
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!ToRegister(right).is(ebx));
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
+    __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+  } else {
+    ASSERT(exponent_type.IsTagged());
+    CpuFeatures::Scope scope(SSE2);
+    Register right_reg = ToRegister(right);
+
+    Label non_smi, call;
+    __ test(right_reg, Immediate(kSmiTagMask));
+    __ j(not_zero, &non_smi);
+    __ SmiUntag(right_reg);
+    __ cvtsi2sd(result_reg, Operand(right_reg));
+    __ jmp(&call);
+
+    __ bind(&non_smi);
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!right_reg.is(ebx));
+    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+    DeoptimizeIf(not_equal, instr->environment());
+    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+    __ bind(&call);
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
+    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+  }
+
+  // Return value is in st(0) on ia32.
+  // Store it into the (fixed) result register.
+  __ sub(Operand(esp), Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movdbl(result_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
+    case kMathCos:
+      DoMathCos(instr);
+      break;
+    case kMathSin:
+      DoMathSin(instr);
+      break;
+    case kMathLog:
+      DoMathLog(instr);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->target());
+  CallKnownFunction(instr->target(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->input()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Set(eax, Immediate(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  int offset = instr->offset();
+
+  if (!instr->transition().is_null()) {
+    __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+  }
+
+  // Do the store.
+  if (instr->is_in_object()) {
+    __ mov(FieldOperand(object, offset), value);
+    if (instr->needs_write_barrier()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWrite(object, offset, value, temp);
+    }
+  } else {
+    Register temp = ToRegister(instr->temp());
+    __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(temp, offset), value);
+    if (instr->needs_write_barrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWrite(temp, offset, value, object);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+  DeoptimizeIf(above_equal, instr->environment());
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(FieldOperand(elements, offset), value);
+  } else {
+    __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+           value);
+  }
+
+  // Update the write barrier unless we're certain that we're storing a smi.
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Compute address of modified element and store it into key register.
+    __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+    __ RecordWrite(elements, key, value);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(ecx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTag(reg);
+  __ j(overflow, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->input());
+  Register tmp = reg.is(eax) ? ecx : eax;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  NearLabel done;
+  __ SmiUntag(reg);
+  __ xor_(reg, 0x80000000);
+  __ cvtsi2sd(xmm0, Operand(reg));
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+    __ jmp(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  if (!reg.is(eax)) __ mov(reg, eax);
+
+  // Done. Put the value in xmm0 into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  Register reg = ToRegister(instr->result());
+  Register tmp = ToRegister(instr->temp());
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Set(reg, Immediate(0));
+
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    __ test(ToRegister(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  }
+  __ SmiUntag(ToRegister(input));
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                XMMRegister result_reg,
+                                LEnvironment* env) {
+  NearLabel load_smi, heap_number, done;
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(equal, &heap_number);
+
+  __ cmp(input_reg, Factory::undefined_value());
+  DeoptimizeIf(not_equal, env);
+
+  // Convert undefined to NaN.
+  __ push(input_reg);
+  __ mov(input_reg, Factory::nan_value());
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ pop(input_reg);
+  __ jmp(&done);
+
+  // Heap number to XMM conversion.
+  __ bind(&heap_number);
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  // Smi to XMM conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ cvtsi2sd(result_reg, Operand(input_reg));
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  NearLabel done, heap_number;
+  Register input_reg = ToRegister(instr->input());
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+
+  if (instr->truncating()) {
+    __ j(equal, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ cmp(input_reg, Factory::undefined_value());
+    DeoptimizeIf(not_equal, instr->environment());
+    __ mov(input_reg, 0);
+    __ jmp(&done);
+
+    __ bind(&heap_number);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert;
+      // Use more powerful conversion when sse3 is available.
+      // Load x87 register with heap number.
+      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+      // Get exponent alone and check for too-big exponent.
+      __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+      __ and_(input_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      // Pop FPU stack before deoptimizing.
+      __ ffree(0);
+      __ fincstp();
+      DeoptimizeIf(no_condition, instr->environment());
+
+      // Reserve space for 64 bit answer.
+      __ bind(&convert);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+    } else {
+      NearLabel deopt;
+      XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+      __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+      __ cvttsd2si(input_reg, Operand(xmm0));
+      __ cmp(input_reg, 0x80000000u);
+      __ j(not_equal, &done);
+      // Check if the input was 0x8000000 (kMinInt).
+      // If no, then we got an overflow and we deoptimize.
+      ExternalReference min_int = ExternalReference::address_of_min_int();
+      __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
+      __ ucomisd(xmm_temp, xmm0);
+      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    }
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(not_equal, instr->environment());
+
+    XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+    __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, Operand(xmm0));
+    __ cvtsi2sd(xmm_temp, Operand(input_reg));
+    __ ucomisd(xmm0, xmm_temp);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ test(input_reg, Operand(input_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(input_reg, xmm0);
+      __ and_(input_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(not_zero, deferred->entry());
+
+  // Smi to int32 conversion
+  __ SmiUntag(input_reg);  // Untag smi.
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  XMMRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsRegister());
+
+  XMMRegister input_reg = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cmp(result_reg, 0x80000000u);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      // This will deoptimize if the exponent of the input in out of range.
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert, done;
+      __ j(not_equal, &done);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ movdbl(Operand(esp, 0), input_reg);
+      // Get exponent alone and check for too-big exponent.
+      __ mov(result_reg, Operand(esp, sizeof(int32_t)));
+      __ and_(result_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      DeoptimizeIf(no_condition, instr->environment());
+      __ bind(&convert);
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fld_d(Operand(esp, 0));
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      __ bind(&done);
+    } else {
+      // This will bail out if the input was not in the int32 range (or,
+      // unfortunately, if the input was 0x80000000).
+      DeoptimizeIf(equal, instr->environment());
+    }
+  } else {
+    NearLabel done;
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cvtsi2sd(xmm0, Operand(result_reg));
+    __ ucomisd(xmm0, input_reg);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // The integer converted back is equal to the original. We
+      // only have to test if we got -0 as an input.
+      __ test(result_reg, Operand(result_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(result_reg, input_reg);
+      // Bit 0 contains the sign of the double in input_reg.
+      // If input was positive, we are ok and return 0, otherwise
+      // deoptimize.
+      __ and_(result_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(instr->condition(), instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+  InstanceType first = instr->hydrogen()->first();
+  InstanceType last = instr->hydrogen()->last();
+
+  __ test(input, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr->environment());
+
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+          static_cast<int8_t>(first));
+
+  // If there is only one type in the interval check for equality.
+  if (first == last) {
+    DeoptimizeIf(not_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+    // Omit check for the last type.
+    if (last != LAST_TYPE) {
+      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+              static_cast<int8_t>(last));
+      DeoptimizeIf(above, instr->environment());
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->input()->IsRegister());
+  Register reg = ToRegister(instr->input());
+  __ cmp(reg, instr->hydrogen()->target());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
+  if (Heap::InNewSpace(*prototype)) {
+    Handle<JSGlobalPropertyCell> cell =
+        Factory::NewJSGlobalPropertyCell(prototype);
+    __ mov(result, Operand::Cell(cell));
+  } else {
+    __ mov(result, prototype);
+  }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register reg = ToRegister(instr->temp());
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<Map> receiver_map = instr->receiver_map();
+  Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+
+  // Load prototype object.
+  LoadPrototype(reg, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadPrototype(reg, current_prototype);
+  }
+
+  // Check the holder map.
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_elements()));
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_properties()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+
+  // Pick the right runtime function or stub to call.
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  }
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  NearLabel materialized;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ecx = literals array.
+  // ebx = regexp literal.
+  // eax = regexp literal clone.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ mov(ebx, FieldOperand(ecx, literal_offset));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(not_equal, &materialized);
+
+  // Create regexp literal using runtime function
+  // Result will be in eax.
+  __ push(ecx);
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->pattern()));
+  __ push(Immediate(instr->hydrogen()->flags()));
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(ebx, eax);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(size)));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(ebx);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ mov(edx, FieldOperand(ebx, i));
+    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+    __ mov(FieldOperand(eax, i), edx);
+    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+    __ mov(FieldOperand(eax, size - kPointerSize), edx);
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = !instr->hydrogen()->pretenure();
+  if (shared_info->num_literals() == 0 && !pretenure) {
+    FastNewClosureStub stub;
+    __ push(Immediate(shared_info));
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ push(esi);
+    __ push(Immediate(shared_info));
+    __ push(Immediate(pretenure
+                      ? Factory::true_value()
+                      : Factory::false_value()));
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  LOperand* input = instr->input();
+  if (input->IsConstantOperand()) {
+    __ push(ToImmediate(input));
+  } else {
+    __ push(ToOperand(input));
+  }
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Label true_label;
+  Label false_label;
+  NearLabel done;
+
+  Condition final_branch_condition = EmitTypeofIs(&true_label,
+                                                  &false_label,
+                                                  input,
+                                                  instr->type_literal());
+  __ j(final_branch_condition, &true_label);
+  __ bind(&false_label);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ jmp(&done);
+
+  __ bind(&true_label);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = no_condition;
+  if (type_name->Equals(Heap::number_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, true_label);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::string_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+    final_branch_condition = below;
+
+  } else if (type_name->Equals(Heap::boolean_symbol())) {
+    __ cmp(input, Handle<Object>(Heap::true_value()));
+    __ j(equal, true_label);
+    __ cmp(input, Handle<Object>(Heap::false_value()));
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::undefined_symbol())) {
+    __ cmp(input, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    final_branch_condition = not_zero;
+
+  } else if (type_name->Equals(Heap::function_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    // Regular expressions => 'function' (they are callable).
+    __ CmpInstanceType(input, JS_REGEXP_TYPE);
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::object_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ cmp(input, Factory::null_value());
+    __ j(equal, true_label);
+    // Regular expressions => 'function', not 'object'.
+    __ CmpObjectType(input, JS_REGEXP_TYPE, input);
+    __ j(equal, false_label);
+    // Check for undetectable objects => false.
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    // Check for JS objects => true.
+    __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+    __ j(below, false_label);
+    __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
+    final_branch_condition = below_equal;
+
+  } else {
+    final_branch_condition = not_equal;
+    __ jmp(false_label);
+    // A dead branch instruction will be generated after this point.
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  __ push(ToOperand(obj));
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  RecordPosition(instr->pointer_map()->position());
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  NearLabel done;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &done);
+
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
new file mode 100644
index 0000000..6d8173a
--- /dev/null
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -0,0 +1,265 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
+
+#include "ia32/lithium-ia32.h"
+
+#include "checks.h"
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadPrototype(Register result, Handle<JSObject> prototype);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  int ToInteger32(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+  Immediate ToImmediate(LOperand* op);
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
new file mode 100644
index 0000000..3b272d0
--- /dev/null
+++ b/src/ia32/lithium-ia32.cc
@@ -0,0 +1,2128 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) const {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
+      }
+      stream->Add("; ");
+    }
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) const {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+
+void LBinaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  left()->PrintTo(stream);
+  stream->Add(" ");
+  right()->PrintTo(stream);
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_object(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) const {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[ecx] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) const {
+  LUnaryOperation::PrintDataTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LUnaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+LChunk::LChunk(HGraph* graph)
+    : spill_slot_count_(0),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+void LChunk::Verify() const {
+  // TODO(twuerthinger): Implement verification for chunk.
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
+                           LOperand* marker_operand)
+    : nodes_(4),
+      identified_cycles_(4),
+      result_(4),
+      marker_operand_(marker_operand),
+      next_visited_id_(0) {
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i]);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start) {
+  ZoneList<LOperand*> circle_operands(8);
+  circle_operands.Add(marker_operand_);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    circle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  circle_operands.Add(marker_operand_);
+
+  for (int i = circle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = circle_operands[i];
+    LOperand* to = circle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a circle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
+                                               XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+LOperand* LChunkBuilder::Temp() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+
+  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+  return DefineSameAsFirst(new LBitI(op, left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseFixed(right_value, ecx);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
+  if (can_deopt) AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, edx);
+  LOperand* right_operand = UseFixed(right, eax);
+  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    if (FLAG_trace_environment) {
+      PrintF("Process instruction %d\n", current->id());
+    }
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+void LEnvironment::WriteTranslation(LCodeGen* cgen,
+                                    Translation* translation) const {
+  if (this == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - parameter_count();
+
+  outer()->WriteTranslation(cgen, translation);
+  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
+  translation->BeginFrame(ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (spilled_registers_ != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          spilled_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_registers_[value->index()],
+                               HasTaggedValueAt(i));
+      } else if (value->IsDoubleRegister() &&
+                 spilled_double_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_double_registers_[value->index()],
+                               false);
+      }
+    }
+
+    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) const {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->values()->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                                   instr->include_stack_check());
+  if (instr->include_stack_check()) result = AssignPointerMap(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  ASSERT(first != NULL && second != NULL);
+  int first_id = first->block_id();
+  int second_id = second->block_id();
+
+  if (v->EmitAtUses()) {
+    if (v->IsClassOfTest()) {
+      HClassOfTest* compare = HClassOfTest::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                       TempRegister(),
+                                       TempRegister(),
+                                       first_id,
+                                       second_id);
+    } else if (v->IsCompare()) {
+      HCompare* compare = HCompare::cast(v);
+      Token::Value op = compare->token();
+      HValue* left = compare->left();
+      HValue* right = compare->right();
+      if (left->representation().IsInteger32()) {
+        ASSERT(right->representation().IsInteger32());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseOrConstantAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   false);
+      } else if (left->representation().IsDouble()) {
+        ASSERT(right->representation().IsDouble());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseRegisterAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   true);
+      } else {
+        ASSERT(left->representation().IsTagged());
+        ASSERT(right->representation().IsTagged());
+        bool reversed = op == Token::GT || op == Token::LTE;
+        LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
+        LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
+        LInstruction* result = new LCmpTAndBranch(left_operand,
+                                                  right_operand,
+                                                  first_id,
+                                                  second_id);
+        return MarkAsCall(result, instr);
+      }
+    } else if (v->IsIsSmi()) {
+      HIsSmi* compare = HIsSmi::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LIsSmiAndBranch(Use(compare->value()),
+                                 first_id,
+                                 second_id);
+    } else if (v->IsHasInstanceType()) {
+      HHasInstanceType* compare = HHasInstanceType::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                           TempRegister(),
+                                           first_id,
+                                           second_id);
+    } else if (v->IsHasCachedArrayIndex()) {
+      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasCachedArrayIndexAndBranch(
+          UseRegisterAtStart(compare->value()), first_id, second_id);
+    } else if (v->IsIsNull()) {
+      HIsNull* compare = HIsNull::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      // We only need a temp register for non-strict compare.
+      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+                                  compare->is_strict(),
+                                  temp,
+                                  first_id,
+                                  second_id);
+    } else if (v->IsIsObject()) {
+      HIsObject* compare = HIsObject::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+                                    temp1,
+                                    temp2,
+                                    first_id,
+                                    second_id);
+    } else if (v->IsCompareJSObjectEq()) {
+      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                         UseRegisterAtStart(compare->right()),
+                                         first_id,
+                                         second_id);
+    } else if (v->IsInstanceOf()) {
+      HInstanceOf* instance_of = HInstanceOf::cast(v);
+      LInstruction* result =
+          new LInstanceOfAndBranch(UseFixed(instance_of->left(), eax),
+                                   UseFixed(instance_of->right(), edx),
+                                   first_id,
+                                   second_id);
+      return MarkAsCall(result, instr);
+    } else if (v->IsTypeofIs()) {
+      HTypeofIs* typeof_is = HTypeofIs::cast(v);
+      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
+                                    first_id,
+                                    second_id);
+    } else {
+      if (v->IsConstant()) {
+        if (HConstant::cast(v)->handle()->IsTrue()) {
+          return new LGoto(first_id);
+        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+          return new LGoto(second_id);
+        }
+      }
+      Abort("Undefined compare before branch");
+      return NULL;
+    }
+  }
+  return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  return new LCmpMapAndBranch(value,
+                              instr->map(),
+                              first->block_id(),
+                              second->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstruction* result =
+      new LInstanceOf(UseFixed(instr->left(), eax),
+                      UseFixed(instr->right(), edx));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* receiver = UseFixed(instr->receiver(), eax);
+  LOperand* length = UseRegisterAtStart(instr->length());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LInstruction* result = new LApplyArguments(function,
+                                             receiver,
+                                             length,
+                                             elements);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = UseOrConstant(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  return DefineAsRegister(new LGlobalObject);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  return DefineAsRegister(new LGlobalReceiver);
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+    LOperand* input = UseFixedDouble(instr->value(), xmm1);
+    LInstruction* result = new LUnaryMathOperation(input);
+    return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  } else {
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LInstruction* result = new LUnaryMathOperation(input);
+    switch (op) {
+      case kMathAbs:
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      case kMathFloor:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathRound:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathSqrt:
+        return DefineSameAsFirst(result);
+      case kMathPowHalf:
+        return AssignEnvironment(DefineSameAsFirst(result));
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  UseFixed(instr->key(), ecx);
+  return MarkAsCall(DefineFixed(new LCallKeyed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), edi);
+  argument_count_ -= instr->argument_count();
+  LInstruction* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), eax));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), xmm1);
+    LOperand* right = UseFixedDouble(instr->right(), xmm2);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineSameAsFirst(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  ASSERT(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  ASSERT(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), xmm1);
+  LOperand* right = exponent_type.IsDouble() ?
+      UseFixedDouble(instr->right(), xmm2) :
+      UseFixed(instr->right(), eax);
+  LPower* result = new LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Token::Value op = instr->token();
+  if (instr->left()->representation().IsInteger32()) {
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, false));
+  } else if (instr->left()->representation().IsDouble()) {
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, true));
+  } else {
+    bool reversed = (op == Token::GT || op == Token::LTE);
+    LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+    LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+    LInstruction* result = new LCmpT(left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LInstruction* result = new LCmpJSObjectEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsNull(value,
+                                      instr->is_strict()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LIsObject(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseAtStart(instr->value());
+
+  return DefineAsRegister(new LIsSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LHasCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseTempRegister(instr->value());
+
+  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
+  LOperand* array = NULL;
+  LOperand* temporary = NULL;
+
+  if (instr->value()->IsLoadElements()) {
+    array = UseRegisterAtStart(instr->value());
+  } else {
+    array = UseRegister(instr->value());
+    temporary = TempRegister();
+  }
+
+  LInstruction* result = new LArrayLength(array, temporary);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LInstruction* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), eax);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      if (needs_check) {
+        LOperand* xmm_temp =
+            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+            ? NULL
+            : FixedTemp(xmm1);
+        LInstruction* res = new LTaggedToI(value, xmm_temp);
+        return AssignEnvironment(DefineSameAsFirst(res));
+      } else {
+        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp = TempRegister();
+
+      // Make sure that temp and result_temp are different registers.
+      LUnallocated* result_temp = TempRegister();
+      LInstruction* result = new LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LInstruction* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      new LCheckPrototypeMaps(temp,
+                              instr->holder(),
+                              instr->receiver_map());
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, not_zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), eax));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    Abort("unsupported constant of type double");
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  LInstruction* result = new LLoadGlobal;
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), eax);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Representation r = instr->representation();
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* load_result = NULL;
+  // Double needs an extra temp, because the result is converted from heap
+  // number to a double register.
+  if (r.IsDouble()) load_result = TempRegister();
+  LInstruction* result = new LLoadKeyedFastElement(obj,
+                                                   key,
+                                                   load_result);
+  if (r.IsDouble()) {
+    result = DefineAsRegister(result);
+  } else {
+    result = DefineSameAsFirst(result);
+  }
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), eax);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), ecx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = !instr->value()->type().IsSmi();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj,
+                              instr->name(),
+                              val,
+                              instr->is_in_object(),
+                              instr->offset(),
+                              temp,
+                              needs_write_barrier,
+                              instr->transition());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LInstruction* result = new LDeleteProperty(Use(instr->object()),
+                                             UseOrConstant(instr->key()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object (we bail out in all other
+  // cases).
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = Use(instr->index());
+  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
+  return DefineAsRegister(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LInstruction* result = new LTypeof(Use(instr->value()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  if (FLAG_trace_environment) {
+    PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
+           instr->ast_id(),
+           instr->id());
+    env->PrintToStd();
+  }
+  ASSERT(env->values()->length() == instr->environment_height());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) const {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
+}
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
new file mode 100644
index 0000000..3f48e50
--- /dev/null
+++ b/src/ia32/lithium-ia32.h
@@ -0,0 +1,2128 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_IA32_H_
+#define V8_IA32_LITHIUM_IA32_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+class LGapNode;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LPower
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGlobalObject
+//   LGlobalReceiver
+//   LLabel
+//   LLayzBailout
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LRegExpConstructResult
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LArrayLength
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckPrototypeMaps
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsObject
+//     LIsObjectAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLength)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsObject)                                   \
+  V(IsObjectAndBranch)                          \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction()
+      : hydrogen_value_(NULL) { }
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const { }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_.set(env); }
+  LEnvironment* environment() const { return environment_.get(); }
+  bool HasEnvironment() const { return environment_.is_set(); }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_result(LOperand* operand) { result_.set(operand); }
+  LOperand* result() const { return result_.get(); }
+  bool HasResult() const { return result_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
+  }
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
+  }
+
+ private:
+  SetOncePointer<LEnvironment> environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  SetOncePointer<LOperand> result_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+};
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
+  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  LOperand* marker_operand_;
+  int next_visited_id_;
+  int bailout_after_ast_id_;
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LGoto: public LInstruction {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LInstruction {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
+  }
+};
+
+
+class LUnknownOSRValue: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+class LUnaryOperation: public LInstruction {
+ public:
+  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+  LOperand* input() const { return input_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* input_;
+};
+
+
+class LBinaryOperation: public LInstruction {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right)
+      : left_(left), right_(right) { }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return left_; }
+  LOperand* right() const { return right_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* left_;
+  LOperand* right_;
+};
+
+
+class LApplyArguments: public LBinaryOperation {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements)
+      : LBinaryOperation(function, receiver),
+        length_(length),
+        elements_(elements) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return left(); }
+  LOperand* receiver() const { return right(); }
+  LOperand* length() const { return length_; }
+  LOperand* elements() const { return elements_; }
+
+ private:
+  LOperand* length_;
+  LOperand* elements_;
+};
+
+
+class LAccessArgumentsAt: public LInstruction {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
+      : arguments_(arguments), length_(length), index_(index) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return arguments_; }
+  LOperand* length() const { return length_; }
+  LOperand* index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* arguments_;
+  LOperand* length_;
+  LOperand* index_;
+};
+
+
+class LArgumentsLength: public LUnaryOperation {
+ public:
+  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LInstruction {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation {
+ public:
+  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
+      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+
+  Token::Value op() const { return op_; }
+  bool is_double() const { return is_double_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+
+ private:
+  Token::Value op_;
+  bool is_double_;
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(Token::Value op,
+                  LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id,
+                  bool is_double)
+      : LCmpID(op, left, right, is_double),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation {
+ public:
+  LIsNull(LOperand* value, bool is_strict)
+      : LUnaryOperation(value), is_strict_(is_strict) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+
+  bool is_strict() const { return is_strict_; }
+
+ private:
+  bool is_strict_;
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   bool is_strict,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value, is_strict),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsObject: public LUnaryOperation {
+ public:
+  LIsObject(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+  LIsObjectAndBranch(LOperand* value,
+                     LOperand* temp,
+                     LOperand* temp2,
+                     int true_block_id,
+                     int false_block_id)
+      : LIsObject(value, temp),
+        temp2_(temp2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp2() const { return temp2_; }
+
+ private:
+  LOperand* temp2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand *temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LInstruction {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation {
+ public:
+  LCmpMapAndBranch(LOperand* value,
+                   Handle<Map> map,
+                   int true_block_id,
+                   int false_block_id)
+      : LUnaryOperation(value),
+        map_(map),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return map_; }
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  Handle<Map> map_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LArrayLength: public LUnaryOperation {
+ public:
+  LArrayLength(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LValueOf: public LUnaryOperation {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation {
+ public:
+  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LBinaryOperation {
+ public:
+  LPower(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LBinaryOperation {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadElements: public LUnaryOperation {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation {
+ public:
+  LLoadKeyedFastElement(LOperand* elements,
+                        LOperand* key,
+                        LOperand* load_result)
+      : LBinaryOperation(elements, key),
+        load_result_(load_result) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+  LOperand* load_result() const { return load_result_; }
+
+ private:
+  LOperand* load_result_;
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LPushArgument: public LUnaryOperation {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation {
+ public:
+  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LInstruction {
+ public:
+  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
+      : object_(obj), name_(name), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  Handle<Object> name() const { return name_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  Handle<Object> name_;
+  LOperand* value_;
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   Handle<Object> name,
+                   LOperand* val,
+                   bool in_object,
+                   int offset,
+                   LOperand* temp,
+                   bool needs_write_barrier,
+                   Handle<Map> transition)
+      : LStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset),
+        temp_(temp),
+        needs_write_barrier_(needs_write_barrier),
+        transition_(transition) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+
+  bool is_in_object() { return is_in_object_; }
+  int offset() { return offset_; }
+  LOperand* temp() { return temp_; }
+  bool needs_write_barrier() { return needs_write_barrier_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  LOperand* temp_;
+  bool needs_write_barrier_;
+  Handle<Map> transition_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj,
+                     Handle<Object> name,
+                     LOperand* val)
+      : LStoreNamed(obj, name, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+};
+
+
+class LStoreKeyed: public LInstruction {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
+      : object_(obj), key_(key), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  LOperand* key() const { return key_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  LOperand* key_;
+  LOperand* value_;
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LInstruction {
+ public:
+  LCheckPrototypeMaps(LOperand* temp,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : temp_(temp),
+        holder_(holder),
+        receiver_map_(receiver_map) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+
+  LOperand* temp() const { return temp_; }
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+ private:
+  LOperand* temp_;
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class LCheckSmi: public LUnaryOperation {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LInstruction {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  // Emit frame translation commands for this environment.
+  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
+
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+};
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph);
+
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
+  }
+
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
+  }
+
+  void Verify() const;
+
+ private:
+  int spill_slot_count_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instructions_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LInstruction* instr, LUnallocated* result);
+  LInstruction* Define(LInstruction* instr);
+  LInstruction* DefineAsRegister(LInstruction* instr);
+  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
+  LInstruction* DefineSameAsAny(LInstruction* instr);
+  LInstruction* DefineSameAsFirst(LInstruction* instr);
+  LInstruction* DefineFixed(LInstruction* instr, Register reg);
+  LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that may be a memory location.
+  LOperand* Temp();
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(XMMRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instructions_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index cbf93dd..7c33906 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -74,30 +74,6 @@
 }
 
 
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
-    cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
-    j(cc, branch);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start().address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, Heap::NewSpaceMask());
-    j(cc, branch);
-  }
-}
-
-
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -109,7 +85,7 @@
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
-  Label done;
+  NearLabel done;
 
   // Skip barrier if writing a smi.
   ASSERT_EQ(0, kSmiTag);
@@ -183,13 +159,6 @@
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  cmp(esp,
-      Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
-  j(below, on_stack_overflow);
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
   Set(eax, Immediate(0));
@@ -364,9 +333,20 @@
 }
 
 
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
-  // Reserve space for arguments.
-  sub(Operand(esp), Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+  // Optionally save all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+    sub(Operand(esp), Immediate(space));
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+    }
+  } else {
+    sub(Operand(esp), Immediate(argc * kPointerSize));
+  }
 
   // Get the required frame alignment for the OS.
   static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -380,7 +360,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
   // Setup argc and argv in callee-saved registers.
@@ -388,17 +368,27 @@
   mov(edi, Operand(eax));
   lea(esi, Operand(ebp, eax, times_4, offset));
 
-  EnterExitFrameEpilogue(2);
+  EnterExitFrameEpilogue(2, save_doubles);
 }
 
 
 void MacroAssembler::EnterApiExitFrame(int argc) {
   EnterExitFramePrologue();
-  EnterExitFrameEpilogue(argc);
+  EnterExitFrameEpilogue(argc, false);
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+    }
+  }
+
   // Get the return address from the stack and restore the frame pointer.
   mov(ecx, Operand(ebp, 1 * kPointerSize));
   mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1098,6 +1088,16 @@
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  Runtime::Function* function = Runtime::FunctionForId(id);
+  Set(eax, Immediate(function->nargs));
+  mov(ebx, Immediate(ExternalReference(function)));
+  CEntryStub ces(1);
+  ces.SaveDoubles();
+  CallStub(&ces);
+}
+
+
 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
                                             int num_arguments) {
   return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -1192,25 +1192,29 @@
 }
 
 
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly.  If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
+// If true, a Handle<T> returned by value from a function with cdecl calling
+// convention will be returned directly as a value of location_ field in a
+// register eax.
+// If false, it is returned as a pointer to a preallocated by caller memory
+// region. Pointer to this region should be passed to a function as an
+// implicit first argument.
+#if defined(USING_BSD_ABI) || defined(__MINGW32__)
+static const bool kReturnHandlesDirectly = true;
 #else
-static const bool kPassHandlesDirectly = false;
+static const bool kReturnHandlesDirectly = false;
 #endif
 
 
 Operand ApiParameterOperand(int index) {
-  return Operand(esp, (index + (kPassHandlesDirectly ? 0 : 1)) * kPointerSize);
+  return Operand(
+      esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
 }
 
 
 void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
-  if (kPassHandlesDirectly) {
+  if (kReturnHandlesDirectly) {
     EnterApiExitFrame(argc);
-    // When handles as passed directly we don't have to allocate extra
+    // When handles are returned directly we don't have to allocate extra
     // space for and pass an out parameter.
   } else {
     // We allocate two additional slots: return value and pointer to it.
@@ -1255,7 +1259,7 @@
   // Call the api function!
   call(function->address(), RelocInfo::RUNTIME_ENTRY);
 
-  if (!kPassHandlesDirectly) {
+  if (!kReturnHandlesDirectly) {
     // The returned value is a pointer to the handle holding the result.
     // Dereference this to get to the location.
     mov(eax, Operand(eax, 0));
@@ -1336,7 +1340,8 @@
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   bool definitely_matches = false;
   Label invoke;
   if (expected.is_immediate()) {
@@ -1387,6 +1392,7 @@
 
     if (flag == CALL_FUNCTION) {
       call(adaptor, RelocInfo::CODE_TARGET);
+      if (post_call_generator != NULL) post_call_generator->Generate();
       jmp(done);
     } else {
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1399,11 +1405,14 @@
 void MacroAssembler::InvokeCode(const Operand& code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code);
@@ -1416,12 +1425,15 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
   Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  InvokePrologue(expected, actual, code, dummy, &done,
+                 flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code, rmode);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code, rmode);
@@ -1432,7 +1444,8 @@
 
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1441,25 +1454,37 @@
 
   ParameterCount expected(ebx);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag);
+             expected, actual, flag, post_call_generator);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
+
   ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+               expected, actual, flag, post_call_generator);
+  } else {
+    Handle<Code> code(function->code());
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
+               flag, post_call_generator);
+  }
 }
 
 
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   PostCallGenerator* post_call_generator) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1469,7 +1494,7 @@
   ParameterCount expected(0);
   GetBuiltinFunction(edi, id);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-           expected, expected, flag);
+             expected, expected, flag, post_call_generator);
 }
 
 void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1534,6 +1559,15 @@
 }
 
 
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the lowest encoding,
+  // which means that lowest encodings are furthest away from
+  // the stack pointer.
+  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  return kNumSafepointRegisters - reg_code - 1;
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index d208dbe..6f5fa87 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,6 +51,7 @@
 
 // Forward declaration.
 class JumpTarget;
+class PostCallGenerator;
 
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
@@ -69,10 +70,11 @@
 
   // Check if object is in new space.
   // scratch can be object itself, but it will be clobbered.
+  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,  // equal for new space, not_equal otherwise.
-                  Label* branch);
+                  LabelType* branch);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -103,12 +105,6 @@
 #endif
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  // Do simple test for stack overflow. This doesn't handle an overflow.
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -117,18 +113,18 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either in normal or debug mode.
-  // Expects the number of arguments in register eax and
-  // sets up the number of arguments in register edi and the pointer
-  // to the first argument in register esi.
-  void EnterExitFrame();
+  // Enter specific kind of exit frame. Expects the number of
+  // arguments in register eax and sets up the number of arguments in
+  // register edi and the pointer to the first argument in register
+  // esi.
+  void EnterExitFrame(bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -144,6 +140,11 @@
   // function and map can be the same.
   void LoadGlobalFunctionInitialMap(Register function, Register map);
 
+  // Push and pop the registers that can hold pointers.
+  void PushSafepointRegisters() { pushad(); }
+  void PopSafepointRegisters() { popad(); }
+  static int SafepointRegisterStackIndex(int reg_code);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -151,27 +152,33 @@
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeFlag flag,
+                     PostCallGenerator* post_call_generator = NULL);
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -457,6 +464,7 @@
 
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Call a runtime function, returning the CodeStub object called.
   // Try to generate the stub code if necessary.  Do not perform a GC
@@ -546,6 +554,12 @@
 
   void Call(Label* target) { call(target); }
 
+  // Emit call to the code we are currently generating.
+  void CallSelf() {
+    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+    call(self, RelocInfo::CODE_TARGET);
+  }
+
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
@@ -618,14 +632,15 @@
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
   void EnterExitFramePrologue();
-  void EnterExitFrameEpilogue(int argc);
+  void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
   void LeaveExitFrameEpilogue();
 
@@ -644,6 +659,31 @@
 };
 
 
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                LabelType* branch) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
+    cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
+    j(cc, branch);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, Heap::NewSpaceMask());
+    j(cc, branch);
+  }
+}
+
+
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -664,6 +704,17 @@
 };
 
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index adcb521..99888b0 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -855,9 +855,14 @@
   }
   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Immediate(Handle<Object>(cell)));
-  __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Factory::the_hole_value()));
+  if (Serializer::enabled()) {
+    __ mov(scratch, Immediate(Handle<Object>(cell)));
+    __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+           Immediate(Factory::the_hole_value()));
+  } else {
+    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
+           Immediate(Factory::the_hole_value()));
+  }
   __ j(not_equal, miss, not_taken);
   return cell;
 }
@@ -1326,8 +1331,12 @@
                                                     JSFunction* function,
                                                     Label* miss) {
   // Get the value from the cell.
-  __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check that the cell contains the same function.
   if (Heap::InNewSpace(function)) {
@@ -1710,7 +1719,7 @@
   char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1785,7 +1794,7 @@
   char_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1858,7 +1867,7 @@
   char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -2124,8 +2133,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, NULL, function, name);
     Object* result;
@@ -2366,8 +2375,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, cell, function, name);
     Object* result;
@@ -2399,10 +2408,18 @@
   // Jump to the cached code (tail call).
   __ IncrementCounter(&Counters::call_global_inline, 1);
   ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                  expected, arguments(), JUMP_FUNCTION);
+  } else {
+    Handle<Code> code(function->code());
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2565,8 +2582,12 @@
   __ j(not_equal, &miss, not_taken);
 
   // Store the value in the cell.
-  __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  if (Serializer::enabled()) {
+    __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  } else {
+    __ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
+  }
 
   // Return the value (register eax).
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2620,6 +2641,63 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  } else {
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
@@ -2793,8 +2871,12 @@
   CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
 
   // Get the value from the cell.
-  __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
@@ -3019,6 +3101,51 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss, not_taken);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(ecx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, Factory::the_hole_value());
+  __ j(equal, &miss, not_taken);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {