Version 3.2.9.

Removed support for ABI prior to EABI on ARM.

Fixed multiple crash bugs.

Added GCMole to the repository, a simple static analysis tool that
searches for GC-unsafe evaluation order dependent callsites.

Made preparser API be exported in shared libraries.

Fixed multiple issues in EcmaScript 5 strict mode implementation.

Fixed mutable __proto__ property if object is not extensible
(Issue 1309).

Fixed auto suspension of the sampler thread.


git-svn-id: http://v8.googlecode.com/svn/trunk@7570 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index e5f4e32..06ee907 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -87,7 +87,6 @@
     interpreter-irregexp.cc
     isolate.cc
     jsregexp.cc
-    jump-target.cc
     lithium-allocator.cc
     lithium.cc
     liveedit.cc
@@ -107,7 +106,6 @@
     regexp-macro-assembler-irregexp.cc
     regexp-macro-assembler.cc
     regexp-stack.cc
-    register-allocator.cc
     rewriter.cc
     runtime.cc
     runtime-profiler.cc
@@ -133,14 +131,11 @@
     v8threads.cc
     variables.cc
     version.cc
-    virtual-frame.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
     """),
   'arch:arm': Split("""
-    jump-target-light.cc
-    virtual-frame-light.cc
     arm/builtins-arm.cc
     arm/code-stubs-arm.cc
     arm/codegen-arm.cc
@@ -152,20 +147,15 @@
     arm/frames-arm.cc
     arm/full-codegen-arm.cc
     arm/ic-arm.cc
-    arm/jump-target-arm.cc
     arm/lithium-arm.cc
     arm/lithium-codegen-arm.cc
     arm/lithium-gap-resolver-arm.cc
     arm/macro-assembler-arm.cc
     arm/regexp-macro-assembler-arm.cc
-    arm/register-allocator-arm.cc
     arm/stub-cache-arm.cc
-    arm/virtual-frame-arm.cc
     arm/assembler-arm.cc
     """),
   'arch:mips': Split("""
-    jump-target-light.cc
-    virtual-frame-light.cc
     mips/assembler-mips.cc
     mips/builtins-mips.cc
     mips/code-stubs-mips.cc
@@ -178,16 +168,11 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
-    mips/jump-target-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
-    mips/register-allocator-mips.cc
     mips/stub-cache-mips.cc
-    mips/virtual-frame-mips.cc
     """),
   'arch:ia32': Split("""
-    jump-target-heavy.cc
-    virtual-frame-heavy.cc
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
     ia32/code-stubs-ia32.cc
@@ -199,19 +184,14 @@
     ia32/frames-ia32.cc
     ia32/full-codegen-ia32.cc
     ia32/ic-ia32.cc
-    ia32/jump-target-ia32.cc
     ia32/lithium-codegen-ia32.cc
     ia32/lithium-gap-resolver-ia32.cc
     ia32/lithium-ia32.cc
     ia32/macro-assembler-ia32.cc
     ia32/regexp-macro-assembler-ia32.cc
-    ia32/register-allocator-ia32.cc
     ia32/stub-cache-ia32.cc
-    ia32/virtual-frame-ia32.cc
     """),
   'arch:x64': Split("""
-    jump-target-heavy.cc
-    virtual-frame-heavy.cc
     x64/assembler-x64.cc
     x64/builtins-x64.cc
     x64/code-stubs-x64.cc
@@ -223,15 +203,12 @@
     x64/frames-x64.cc
     x64/full-codegen-x64.cc
     x64/ic-x64.cc
-    x64/jump-target-x64.cc
     x64/lithium-codegen-x64.cc
     x64/lithium-gap-resolver-x64.cc
     x64/lithium-x64.cc
     x64/macro-assembler-x64.cc
     x64/regexp-macro-assembler-x64.cc
-    x64/register-allocator-x64.cc
     x64/stub-cache-x64.cc
-    x64/virtual-frame-x64.cc
     """),
   'simulator:arm': ['arm/simulator-arm.cc'],
   'simulator:mips': ['mips/simulator-mips.cc'],
diff --git a/src/api.cc b/src/api.cc
index b0d287d..c2b2df8 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2580,6 +2580,9 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  // We do not allow exceptions thrown while setting the prototype
+  // to propagate outside.
+  TryCatch try_catch;
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
   has_pending_exception = result.is_null();
@@ -2792,6 +2795,26 @@
 }
 
 
+static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
+                                        i::Handle<i::JSObject> receiver,
+                                        i::Handle<i::String> name,
+                                        i::LookupResult* lookup) {
+  if (!lookup->IsProperty()) {
+    // No real property was found.
+    return Local<Value>();
+  }
+
+  // If the property being looked up is a callback, it can throw
+  // an exception.
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+
+  return Utils::ToLocal(result);
+}
+
+
 Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
       Handle<String> key) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -2803,17 +2826,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
-  if (lookup.IsProperty()) {
-    PropertyAttributes attributes;
-    i::Object* property =
-        self_obj->GetProperty(*self_obj,
-                              &lookup,
-                              *key_obj,
-                              &attributes)->ToObjectUnchecked();
-    i::Handle<i::Object> result(property);
-    return Utils::ToLocal(result);
-  }
-  return Local<Value>();  // No real property was found in prototype chain.
+  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
 
 
@@ -2826,17 +2839,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
-  if (lookup.IsProperty()) {
-    PropertyAttributes attributes;
-    i::Object* property =
-        self_obj->GetProperty(*self_obj,
-                              &lookup,
-                              *key_obj,
-                              &attributes)->ToObjectUnchecked();
-    i::Handle<i::Object> result(property);
-    return Utils::ToLocal(result);
-  }
-  return Local<Value>();  // No real property was found in prototype chain.
+  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
 
 
@@ -4473,7 +4476,7 @@
   if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
   STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
   isolate->global_handles()->AddImplicitReferences(
-      *Utils::OpenHandle(*parent),
+      i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
       reinterpret_cast<i::Object***>(children), length);
 }
 
diff --git a/src/api.h b/src/api.h
index 6d46713..7423d28 100644
--- a/src/api.h
+++ b/src/api.h
@@ -53,8 +53,8 @@
 class NeanderObject {
  public:
   explicit NeanderObject(int size);
-  inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
-  inline NeanderObject(v8::internal::Object* obj);
+  explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+  explicit inline NeanderObject(v8::internal::Object* obj);
   inline v8::internal::Object* get(int index);
   inline void set(int index, v8::internal::Object* value);
   inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
@@ -69,7 +69,7 @@
 class NeanderArray {
  public:
   NeanderArray();
-  inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+  explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
   inline v8::internal::Handle<v8::internal::JSObject> value() {
     return obj_.value();
   }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 48000b4..73dddfd 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1824,45 +1824,6 @@
 }
 
 
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& dst,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
-                     coproc, CRegister crd,
-                     const MemOperand& dst,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
 // Support for VFP.
 
 void Assembler::vldr(const DwVfpRegister dst,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 185ab6a..9050c2c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -947,16 +947,6 @@
   void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
             LFlag l = Short);  // v5 and above
 
-  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-           LFlag l = Short, Condition cond = al);
-  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-            LFlag l = Short);  // v5 and above
-  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
   // Support for VFP.
   // All these APIs support S0 to S31 and D0 to D15.
   // Currently these APIs do not support extended D registers, i.e, D16 to D31.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 9cca536..5235dd3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 328b519..eecd01d 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -308,13 +308,9 @@
 
 
 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
-  Register exponent = result1_;
-  Register mantissa = result2_;
-#else
   Register exponent = result2_;
   Register mantissa = result1_;
-#endif
+
   Label not_special;
   // Convert from Smi to integer.
   __ mov(source_, Operand(source_, ASR, kSmiTagSize));
@@ -951,18 +947,10 @@
   // Call C routine that may not cause GC or other trouble.
   __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
                    4);
-  // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
-  // Double returned in fp coprocessor register 0 and 1, encoded as
-  // register cr8.  Offsets must be divisible by 4 for coprocessor so we
-  // need to substract the tag from heap_number_result.
-  __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
-  __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
-  // Double returned in registers 0 and 1.
+  // Store answer in the overwritable heap number. Double returned in
+  // registers r0 and r1.
   __ Strd(r0, r1, FieldMemOperand(heap_number_result,
                                   HeapNumber::kValueOffset));
-#endif
   // Place heap_number_result in r0 and return to the pushed return address.
   __ mov(r0, Operand(heap_number_result));
   __ pop(pc);
@@ -1780,1064 +1768,6 @@
 }
 
 
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.  The operands are in r0 and r1.  In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
-    MacroAssembler* masm,
-    Label* not_smi,
-    Register lhs,
-    Register rhs,
-    const Builtins::JavaScript& builtin) {
-  Label slow, slow_reverse, do_the_call;
-  bool use_fp_registers =
-      CpuFeatures::IsSupported(VFP3) &&
-      Token::MOD != op_;
-
-  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
-  Register heap_number_map = r6;
-
-  if (ShouldGenerateSmiCode()) {
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Smi-smi case (overflow).
-    // Since both are Smis there is no heap number to overwrite, so allocate.
-    // The new heap number is in r5.  r3 and r7 are scratch.
-    __ AllocateHeapNumber(
-        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
-    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
-    // using registers d7 and d6 for the double values.
-    if (CpuFeatures::IsSupported(VFP3)) {
-      CpuFeatures::Scope scope(VFP3);
-      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
-      __ vmov(s15, r7);
-      __ vcvt_f64_s32(d7, s15);
-      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
-      __ vmov(s13, r7);
-      __ vcvt_f64_s32(d6, s13);
-      if (!use_fp_registers) {
-        __ vmov(r2, r3, d7);
-        __ vmov(r0, r1, d6);
-      }
-    } else {
-      // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch.
-      __ mov(r7, Operand(rhs));
-      ConvertToDoubleStub stub1(r3, r2, r7, r9);
-      __ push(lr);
-      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
-      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
-      __ mov(r7, Operand(lhs));
-      ConvertToDoubleStub stub2(r1, r0, r7, r9);
-      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
-      __ pop(lr);
-    }
-    __ jmp(&do_the_call);  // Tail call.  No return.
-  }
-
-  // We branch here if at least one of r0 and r1 is not a Smi.
-  __ bind(not_smi);
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  // After this point we have the left hand side in r1 and the right hand side
-  // in r0.
-  if (lhs.is(r0)) {
-    __ Swap(r0, r1, ip);
-  }
-
-  // The type transition also calculates the answer.
-  bool generate_code_to_calculate_answer = true;
-
-  if (ShouldGenerateFPCode()) {
-    // DIV has neither SmiSmi fast code nor specialized slow code.
-    // So don't try to patch a DIV Stub.
-    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
-      switch (op_) {
-        case Token::ADD:
-        case Token::SUB:
-        case Token::MUL:
-          GenerateTypeTransition(masm);  // Tail call.
-          generate_code_to_calculate_answer = false;
-          break;
-
-        case Token::DIV:
-          // DIV has neither SmiSmi fast code nor specialized slow code.
-          // So don't try to patch a DIV Stub.
-          break;
-
-        default:
-          break;
-      }
-    }
-
-    if (generate_code_to_calculate_answer) {
-      Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
-      if (mode_ == NO_OVERWRITE) {
-        // In the case where there is no chance of an overwritable float we may
-        // as well do the allocation immediately while r0 and r1 are untouched.
-        __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
-      }
-
-      // Move r0 to a double in r2-r3.
-      __ tst(r0, Operand(kSmiTagMask));
-      __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
-      __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
-      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-      __ cmp(r4, heap_number_map);
-      __ b(ne, &slow);
-      if (mode_ == OVERWRITE_RIGHT) {
-        __ mov(r5, Operand(r0));  // Overwrite this heap number.
-      }
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // Load the double from tagged HeapNumber r0 to d7.
-        __ sub(r7, r0, Operand(kHeapObjectTag));
-        __ vldr(d7, r7, HeapNumber::kValueOffset);
-      } else {
-        // Calling convention says that second double is in r2 and r3.
-        __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
-      }
-      __ jmp(&finished_loading_r0);
-      __ bind(&r0_is_smi);
-      if (mode_ == OVERWRITE_RIGHT) {
-        // We can't overwrite a Smi so get address of new heap number into r5.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-      }
-
-      if (CpuFeatures::IsSupported(VFP3)) {
-        CpuFeatures::Scope scope(VFP3);
-        // Convert smi in r0 to double in d7.
-        __ mov(r7, Operand(r0, ASR, kSmiTagSize));
-        __ vmov(s15, r7);
-        __ vcvt_f64_s32(d7, s15);
-        if (!use_fp_registers) {
-          __ vmov(r2, r3, d7);
-        }
-      } else {
-        // Write Smi from r0 to r3 and r2 in double format.
-        __ mov(r7, Operand(r0));
-        ConvertToDoubleStub stub3(r3, r2, r7, r4);
-        __ push(lr);
-        __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
-        __ pop(lr);
-      }
-
-      // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
-      // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
-      Label r1_is_not_smi;
-      if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
-          HasSmiSmiFastPath()) {
-        __ tst(r1, Operand(kSmiTagMask));
-        __ b(ne, &r1_is_not_smi);
-        GenerateTypeTransition(masm);  // Tail call.
-      }
-
-      __ bind(&finished_loading_r0);
-
-      // Move r1 to a double in r0-r1.
-      __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
-      __ bind(&r1_is_not_smi);
-      __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
-      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-      __ cmp(r4, heap_number_map);
-      __ b(ne, &slow);
-      if (mode_ == OVERWRITE_LEFT) {
-        __ mov(r5, Operand(r1));  // Overwrite this heap number.
-      }
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // Load the double from tagged HeapNumber r1 to d6.
-        __ sub(r7, r1, Operand(kHeapObjectTag));
-        __ vldr(d6, r7, HeapNumber::kValueOffset);
-      } else {
-        // Calling convention says that first double is in r0 and r1.
-        __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
-      }
-      __ jmp(&finished_loading_r1);
-      __ bind(&r1_is_smi);
-      if (mode_ == OVERWRITE_LEFT) {
-        // We can't overwrite a Smi so get address of new heap number into r5.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-      }
-
-      if (CpuFeatures::IsSupported(VFP3)) {
-        CpuFeatures::Scope scope(VFP3);
-        // Convert smi in r1 to double in d6.
-        __ mov(r7, Operand(r1, ASR, kSmiTagSize));
-        __ vmov(s13, r7);
-        __ vcvt_f64_s32(d6, s13);
-        if (!use_fp_registers) {
-          __ vmov(r0, r1, d6);
-        }
-      } else {
-        // Write Smi from r1 to r1 and r0 in double format.
-        __ mov(r7, Operand(r1));
-        ConvertToDoubleStub stub4(r1, r0, r7, r9);
-        __ push(lr);
-        __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
-        __ pop(lr);
-      }
-
-      __ bind(&finished_loading_r1);
-    }
-
-    if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
-      __ bind(&do_the_call);
-      // If we are inlining the operation using VFP3 instructions for
-      // add, subtract, multiply, or divide, the arguments are in d6 and d7.
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // ARMv7 VFP3 instructions to implement
-        // double precision, add, subtract, multiply, divide.
-
-        if (Token::MUL == op_) {
-          __ vmul(d5, d6, d7);
-        } else if (Token::DIV == op_) {
-          __ vdiv(d5, d6, d7);
-        } else if (Token::ADD == op_) {
-          __ vadd(d5, d6, d7);
-        } else if (Token::SUB == op_) {
-          __ vsub(d5, d6, d7);
-        } else {
-          UNREACHABLE();
-        }
-        __ sub(r0, r5, Operand(kHeapObjectTag));
-        __ vstr(d5, r0, HeapNumber::kValueOffset);
-        __ add(r0, r0, Operand(kHeapObjectTag));
-        __ Ret();
-      } else {
-        // If we did not inline the operation, then the arguments are in:
-        // r0: Left value (least significant part of mantissa).
-        // r1: Left value (sign, exponent, top of mantissa).
-        // r2: Right value (least significant part of mantissa).
-        // r3: Right value (sign, exponent, top of mantissa).
-        // r5: Address of heap number for result.
-
-        __ push(lr);   // For later.
-        __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
-        // Call C routine that may not cause GC or other trouble. r5 is callee
-        // save.
-        __ CallCFunction(
-            ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
-        // Store answer in the overwritable heap number.
-    #if !defined(USE_ARM_EABI)
-        // Double returned in fp coprocessor register 0 and 1, encoded as
-        // register cr8.  Offsets must be divisible by 4 for coprocessor so we
-        // need to substract the tag from r5.
-        __ sub(r4, r5, Operand(kHeapObjectTag));
-        __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
-    #else
-        // Double returned in registers 0 and 1.
-        __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
-    #endif
-        __ mov(r0, Operand(r5));
-        // And we are done.
-        __ pop(pc);
-      }
-    }
-  }
-
-  if (!generate_code_to_calculate_answer &&
-      !slow_reverse.is_linked() &&
-      !slow.is_linked()) {
-    return;
-  }
-
-  if (lhs.is(r0)) {
-    __ b(&slow);
-    __ bind(&slow_reverse);
-    __ Swap(r0, r1, ip);
-  }
-
-  heap_number_map = no_reg;  // Don't use this any more from here on.
-
-  // We jump to here if something goes wrong (one param is not a number of any
-  // sort or new-space allocation fails).
-  __ bind(&slow);
-
-  // Push arguments to the stack
-  __ Push(r1, r0);
-
-  if (Token::ADD == op_) {
-    // Test for string arguments before calling runtime.
-    // r1 : first argument
-    // r0 : second argument
-    // sp[0] : second argument
-    // sp[4] : first argument
-
-    Label not_strings, not_string1, string1, string1_smi2;
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &not_string1);
-    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &not_string1);
-
-    // First argument is a a string, test second.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &string1_smi2);
-    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &string1);
-
-    // First and second argument are strings.
-    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-    __ TailCallStub(&string_add_stub);
-
-    __ bind(&string1_smi2);
-    // First argument is a string, second is a smi. Try to lookup the number
-    // string for the smi in the number string cache.
-    NumberToStringStub::GenerateLookupNumberStringCache(
-        masm, r0, r2, r4, r5, r6, true, &string1);
-
-    // Replace second argument on stack and tailcall string add stub to make
-    // the result.
-    __ str(r2, MemOperand(sp, 0));
-    __ TailCallStub(&string_add_stub);
-
-    // Only first argument is a string.
-    __ bind(&string1);
-    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
-    // First argument was not a string, test second.
-    __ bind(&not_string1);
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &not_strings);
-    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &not_strings);
-
-    // Only second argument is a string.
-    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
-    __ bind(&not_strings);
-  }
-
-  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value.  We truncate towards zero as required
-// by the ES spec.  If this is the case we do the bitwise op and see if the
-// result is a Smi.  If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                                                Register lhs,
-                                                Register rhs) {
-  Label slow, result_not_a_smi;
-  Label rhs_is_smi, lhs_is_smi;
-  Label done_checking_rhs, done_checking_lhs;
-
-  Register heap_number_map = r6;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  __ tst(lhs, Operand(kSmiTagMask));
-  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
-  __ cmp(r4, heap_number_map);
-  __ b(ne, &slow);
-  __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
-  __ jmp(&done_checking_lhs);
-  __ bind(&lhs_is_smi);
-  __ mov(r3, Operand(lhs, ASR, 1));
-  __ bind(&done_checking_lhs);
-
-  __ tst(rhs, Operand(kSmiTagMask));
-  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
-  __ cmp(r4, heap_number_map);
-  __ b(ne, &slow);
-  __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
-  __ jmp(&done_checking_rhs);
-  __ bind(&rhs_is_smi);
-  __ mov(r2, Operand(rhs, ASR, 1));
-  __ bind(&done_checking_rhs);
-
-  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
-  // r0 and r1: Original operands (Smi or heap numbers).
-  // r2 and r3: Signed int32 operands.
-  switch (op_) {
-    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
-    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
-    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
-    case Token::SAR:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, ASR, r2));
-      break;
-    case Token::SHR:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, LSR, r2), SetCC);
-      // SHR is special because it is required to produce a positive answer.
-      // The code below for writing into heap numbers isn't capable of writing
-      // the register as an unsigned int so we go to slow case if we hit this
-      // case.
-      if (CpuFeatures::IsSupported(VFP3)) {
-        __ b(mi, &result_not_a_smi);
-      } else {
-        __ b(mi, &slow);
-      }
-      break;
-    case Token::SHL:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, LSL, r2));
-      break;
-    default: UNREACHABLE();
-  }
-  // check that the *signed* result fits in a smi
-  __ add(r3, r2, Operand(0x40000000), SetCC);
-  __ b(mi, &result_not_a_smi);
-  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
-  __ Ret();
-
-  Label have_to_allocate, got_a_heap_number;
-  __ bind(&result_not_a_smi);
-  switch (mode_) {
-    case OVERWRITE_RIGHT: {
-      __ tst(rhs, Operand(kSmiTagMask));
-      __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(rhs));
-      break;
-    }
-    case OVERWRITE_LEFT: {
-      __ tst(lhs, Operand(kSmiTagMask));
-      __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(lhs));
-      break;
-    }
-    case NO_OVERWRITE: {
-      // Get a new heap number in r5.  r4 and r7 are scratch.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-    }
-    default: break;
-  }
-  __ bind(&got_a_heap_number);
-  // r2: Answer as signed int32.
-  // r5: Heap number to write answer into.
-
-  // Nothing can go wrong now, so move the heap number to r0, which is the
-  // result.
-  __ mov(r0, Operand(r5));
-
-  if (CpuFeatures::IsSupported(VFP3)) {
-    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(s0, r2);
-    if (op_ == Token::SHR) {
-      __ vcvt_f64_u32(d0, s0);
-    } else {
-      __ vcvt_f64_s32(d0, s0);
-    }
-    __ sub(r3, r0, Operand(kHeapObjectTag));
-    __ vstr(d0, r3, HeapNumber::kValueOffset);
-    __ Ret();
-  } else {
-    // Tail call that writes the int32 in r2 to the heap number in r0, using
-    // r3 as scratch.  r0 is preserved and returned.
-    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
-    __ TailCallStub(&stub);
-  }
-
-  if (mode_ != NO_OVERWRITE) {
-    __ bind(&have_to_allocate);
-    // Get a new heap number in r5.  r4 and r7 are scratch.
-    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-    __ jmp(&got_a_heap_number);
-  }
-
-  // If all else failed then we go to the runtime system.
-  __ bind(&slow);
-  __ Push(lhs, rhs);  // Restore stack.
-  switch (op_) {
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-
-
-// This function takes the known int in a register for the cases
-// where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownIntInStub(
-    MacroAssembler* masm,
-    Register result,
-    Register source,
-    Register known_int_register,   // Smi tagged.
-    int known_int,
-    int* required_shift) {  // Including Smi tag shift
-  switch (known_int) {
-    case 3:
-      __ add(result, source, Operand(source, LSL, 1));
-      *required_shift = 1;
-      break;
-    case 5:
-      __ add(result, source, Operand(source, LSL, 2));
-      *required_shift = 1;
-      break;
-    case 6:
-      __ add(result, source, Operand(source, LSL, 1));
-      *required_shift = 2;
-      break;
-    case 7:
-      __ rsb(result, source, Operand(source, LSL, 3));
-      *required_shift = 1;
-      break;
-    case 9:
-      __ add(result, source, Operand(source, LSL, 3));
-      *required_shift = 1;
-      break;
-    case 10:
-      __ add(result, source, Operand(source, LSL, 2));
-      *required_shift = 2;
-      break;
-    default:
-      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
-      __ mul(result, source, known_int_register);
-      *required_shift = 0;
-  }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick.  See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask.  On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
-                              Register lhs,
-                              int mask,
-                              int shift,
-                              Label* entry) {
-  ASSERT(mask > 0);
-  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
-  Label loop;
-  __ bind(&loop);
-  __ and_(ip, lhs, Operand(mask));
-  __ add(lhs, ip, Operand(lhs, LSR, shift));
-  __ bind(entry);
-  __ cmp(lhs, Operand(mask));
-  __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
-                              Register lhs,
-                              Register scratch,
-                              int mask,
-                              int shift1,
-                              int shift2,
-                              Label* entry) {
-  ASSERT(mask > 0);
-  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
-  Label loop;
-  __ bind(&loop);
-  __ bic(scratch, lhs, Operand(mask));
-  __ and_(ip, lhs, Operand(mask));
-  __ add(lhs, ip, Operand(lhs, LSR, shift1));
-  __ add(lhs, lhs, Operand(scratch, LSR, shift2));
-  __ bind(entry);
-  __ cmp(lhs, Operand(mask));
-  __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits).  The top
-// half is subtracted from the bottom half.  If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
-                                                Register lhs,
-                                                int shift,
-                                                int rhs) {
-  int mask = (1 << shift) - 1;
-  __ and_(ip, lhs, Operand(mask));
-  __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
-  __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
-                               Register lhs,
-                               int max,
-                               int denominator) {
-  int limit = denominator;
-  while (limit * 2 <= max) limit *= 2;
-  while (limit >= denominator) {
-    __ cmp(lhs, Operand(limit));
-    __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
-    limit >>= 1;
-  }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
-                               Register result,
-                               Register shift_distance,
-                               Register mask_bits,
-                               Register sum_of_digits) {
-  __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
-  __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
-  __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
-  __ bic(odd_number_, odd_number_, Operand(1));
-  __ mov(odd_number_, Operand(odd_number_, LSL, 1));
-  // We now have (odd_number_ - 1) * 2 in the register.
-  // Build a switch out of branches instead of data because it avoids
-  // having to teach the assembler about intra-code-object pointers
-  // that are not in relative branch instructions.
-  Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
-  Label mod21, mod23, mod25;
-  { Assembler::BlockConstPoolScope block_const_pool(masm);
-    __ add(pc, pc, Operand(odd_number_));
-    // When you read pc it is always 8 ahead, but when you write it you always
-    // write the actual value.  So we put in two nops to take up the slack.
-    __ nop();
-    __ nop();
-    __ b(&mod3);
-    __ b(&mod5);
-    __ b(&mod7);
-    __ b(&mod9);
-    __ b(&mod11);
-    __ b(&mod13);
-    __ b(&mod15);
-    __ b(&mod17);
-    __ b(&mod19);
-    __ b(&mod21);
-    __ b(&mod23);
-    __ b(&mod25);
-  }
-
-  // For each denominator we find a multiple that is almost only ones
-  // when expressed in binary.  Then we do the sum-of-digits trick for
-  // that number.  If the multiple is not 1 then we have to do a little
-  // more work afterwards to get the answer into the 0-denominator-1
-  // range.
-  DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11.
-  __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111.
-  ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111.
-  __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111.
-  ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111.
-  ModReduce(masm, lhs_, 0x3f, 11);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111.
-  ModReduce(masm, lhs_, 0xff, 13);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111.
-  __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111.
-  ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111.
-  ModReduce(masm, lhs_, 0xff, 19);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111.
-  ModReduce(masm, lhs_, 0x3f, 21);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101.
-  ModReduce(masm, lhs_, 0xff, 23);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101.
-  ModReduce(masm, lhs_, 0x7f, 25);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  // lhs_ : x
-  // rhs_ : y
-  // r0   : result
-
-  Register result = r0;
-  Register lhs = lhs_;
-  Register rhs = rhs_;
-
-  // This code can't cope with other register allocations yet.
-  ASSERT(result.is(r0) &&
-         ((lhs.is(r0) && rhs.is(r1)) ||
-          (lhs.is(r1) && rhs.is(r0))));
-
-  Register smi_test_reg = r7;
-  Register scratch = r9;
-
-  // All ops need to know whether we are dealing with two Smis.  Set up
-  // smi_test_reg to tell us that.
-  if (ShouldGenerateSmiCode()) {
-    __ orr(smi_test_reg, lhs, Operand(rhs));
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      Label not_smi;
-      // Fast path.
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        __ b(ne, &not_smi);
-        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
-        // Return if no overflow.
-        __ Ret(vc);
-        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
-      break;
-    }
-
-    case Token::SUB: {
-      Label not_smi;
-      // Fast path.
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        __ b(ne, &not_smi);
-        if (lhs.is(r1)) {
-          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
-          // Return if no overflow.
-          __ Ret(vc);
-          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
-        } else {
-          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
-          // Return if no overflow.
-          __ Ret(vc);
-          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
-        }
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
-      break;
-    }
-
-    case Token::MUL: {
-      Label not_smi, slow;
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        Register scratch2 = smi_test_reg;
-        smi_test_reg = no_reg;
-        __ b(ne, &not_smi);
-        // Remove tag from one operand (but keep sign), so that result is Smi.
-        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
-        // Do multiplication
-        // scratch = lower 32 bits of ip * lhs.
-        __ smull(scratch, scratch2, lhs, ip);
-        // Go slow on overflows (overflow bit is not set).
-        __ mov(ip, Operand(scratch, ASR, 31));
-        // No overflow if higher 33 bits are identical.
-        __ cmp(ip, Operand(scratch2));
-        __ b(ne, &slow);
-        // Go slow on zero result to handle -0.
-        __ tst(scratch, Operand(scratch));
-        __ mov(result, Operand(scratch), LeaveCC, ne);
-        __ Ret(ne);
-        // We need -0 if we were multiplying a negative number with 0 to get 0.
-        // We know one of them was zero.
-        __ add(scratch2, rhs, Operand(lhs), SetCC);
-        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
-        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
-        // Slow case.  We fall through here if we multiplied a negative number
-        // with 0, because that would mean we should produce -0.
-        __ bind(&slow);
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
-      break;
-    }
-
-    case Token::DIV:
-    case Token::MOD: {
-      Label not_smi;
-      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
-        Label lhs_is_unsuitable;
-        __ JumpIfNotSmi(lhs, &not_smi);
-        if (IsPowerOf2(constant_rhs_)) {
-          if (op_ == Token::MOD) {
-            __ and_(rhs,
-                    lhs,
-                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
-                    SetCC);
-            // We now have the answer, but if the input was negative we also
-            // have the sign bit.  Our work is done if the result is
-            // positive or zero:
-            if (!rhs.is(r0)) {
-              __ mov(r0, rhs, LeaveCC, pl);
-            }
-            __ Ret(pl);
-            // A mod of a negative left hand side must return a negative number.
-            // Unfortunately if the answer is 0 then we must return -0.  And we
-            // already optimistically trashed rhs so we may need to restore it.
-            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
-            // Next two instructions are conditional on the answer being -0.
-            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
-            __ b(eq, &lhs_is_unsuitable);
-            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
-            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
-          } else {
-            ASSERT(op_ == Token::DIV);
-            __ tst(lhs,
-                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
-            __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder.
-            int shift = 0;
-            int d = constant_rhs_;
-            while ((d & 1) == 0) {
-              d >>= 1;
-              shift++;
-            }
-            __ mov(r0, Operand(lhs, LSR, shift));
-            __ bic(r0, r0, Operand(kSmiTagMask));
-          }
-        } else {
-          // Not a power of 2.
-          __ tst(lhs, Operand(0x80000000u));
-          __ b(ne, &lhs_is_unsuitable);
-          // Find a fixed point reciprocal of the divisor so we can divide by
-          // multiplying.
-          double divisor = 1.0 / constant_rhs_;
-          int shift = 32;
-          double scale = 4294967296.0;  // 1 << 32.
-          uint32_t mul;
-          // Maximise the precision of the fixed point reciprocal.
-          while (true) {
-            mul = static_cast<uint32_t>(scale * divisor);
-            if (mul >= 0x7fffffff) break;
-            scale *= 2.0;
-            shift++;
-          }
-          mul++;
-          Register scratch2 = smi_test_reg;
-          smi_test_reg = no_reg;
-          __ mov(scratch2, Operand(mul));
-          __ umull(scratch, scratch2, scratch2, lhs);
-          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
-          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
-          // rhs is still the known rhs.  rhs is Smi tagged.
-          // lhs is still the unkown lhs.  lhs is Smi tagged.
-          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
-          // scratch = scratch2 * rhs.
-          MultiplyByKnownIntInStub(masm,
-                                   scratch,
-                                   scratch2,
-                                   rhs,
-                                   constant_rhs_,
-                                   &required_scratch_shift);
-          // scratch << required_scratch_shift is now the Smi tagged rhs *
-          // (lhs / rhs) where / indicates integer division.
-          if (op_ == Token::DIV) {
-            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
-            __ b(ne, &lhs_is_unsuitable);  // There was a remainder.
-            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
-          } else {
-            ASSERT(op_ == Token::MOD);
-            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
-          }
-        }
-        __ Ret();
-        __ bind(&lhs_is_unsuitable);
-      } else if (op_ == Token::MOD &&
-                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
-        // Do generate a bit of smi code for modulus even though the default for
-        // modulus is not to do it, but as the ARM processor has no coprocessor
-        // support for modulus checking for smis makes sense.  We can handle
-        // 1 to 25 times any power of 2.  This covers over half the numbers from
-        // 1 to 100 including all of the first 25.  (Actually the constants < 10
-        // are handled above by reciprocal multiplication.  We only get here for
-        // those cases if the right hand side is not a constant or for cases
-        // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
-        // stub.)
-        Label slow;
-        Label not_power_of_2;
-        ASSERT(!ShouldGenerateSmiCode());
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        // Check for two positive smis.
-        __ orr(smi_test_reg, lhs, Operand(rhs));
-        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
-        __ b(ne, &slow);
-        // Check that rhs is a power of two and not zero.
-        Register mask_bits = r3;
-        __ sub(scratch, rhs, Operand(1), SetCC);
-        __ b(mi, &slow);
-        __ and_(mask_bits, rhs, Operand(scratch), SetCC);
-        __ b(ne, &not_power_of_2);
-        // Calculate power of two modulus.
-        __ and_(result, lhs, Operand(scratch));
-        __ Ret();
-
-        __ bind(&not_power_of_2);
-        __ eor(scratch, scratch, Operand(mask_bits));
-        // At least two bits are set in the modulus.  The high one(s) are in
-        // mask_bits and the low one is scratch + 1.
-        __ and_(mask_bits, scratch, Operand(lhs));
-        Register shift_distance = scratch;
-        scratch = no_reg;
-
-        // The rhs consists of a power of 2 multiplied by some odd number.
-        // The power-of-2 part we handle by putting the corresponding bits
-        // from the lhs in the mask_bits register, and the power in the
-        // shift_distance register.  Shift distance is never 0 due to Smi
-        // tagging.
-        __ CountLeadingZeros(r4, shift_distance, shift_distance);
-        __ rsb(shift_distance, r4, Operand(32));
-
-        // Now we need to find out what the odd number is. The last bit is
-        // always 1.
-        Register odd_number = r4;
-        __ mov(odd_number, Operand(rhs, LSR, shift_distance));
-        __ cmp(odd_number, Operand(25));
-        __ b(gt, &slow);
-
-        IntegerModStub stub(
-            result, shift_distance, odd_number, mask_bits, lhs, r5);
-        __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call.
-
-        __ bind(&slow);
-      }
-      HandleBinaryOpSlowCases(
-          masm,
-          &not_smi,
-          lhs,
-          rhs,
-          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
-      break;
-    }
-
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHR:
-    case Token::SHL: {
-      Label slow;
-      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
-      __ tst(smi_test_reg, Operand(kSmiTagMask));
-      __ b(ne, &slow);
-      Register scratch2 = smi_test_reg;
-      smi_test_reg = no_reg;
-      switch (op_) {
-        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
-        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
-        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
-        case Token::SAR:
-          // Remove tags from right operand.
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(result, Operand(lhs, ASR, scratch2));
-          // Smi tag result.
-          __ bic(result, result, Operand(kSmiTagMask));
-          break;
-        case Token::SHR:
-          // Remove tags from operands.  We can't do this on a 31 bit number
-          // because then the 0s get shifted into bit 30 instead of bit 31.
-          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(scratch, Operand(scratch, LSR, scratch2));
-          // Unsigned shift is not allowed to produce a negative number, so
-          // check the sign bit and the sign bit after Smi tagging.
-          __ tst(scratch, Operand(0xc0000000));
-          __ b(ne, &slow);
-          // Smi tag result.
-          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
-          break;
-        case Token::SHL:
-          // Remove tags from operands.
-          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(scratch, Operand(scratch, LSL, scratch2));
-          // Check that the signed result fits in a Smi.
-          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
-          __ b(mi, &slow);
-          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
-          break;
-        default: UNREACHABLE();
-      }
-      __ Ret();
-      __ bind(&slow);
-      HandleNonSmiBitwiseOp(masm, lhs, rhs);
-      break;
-    }
-
-    default: UNREACHABLE();
-  }
-  // This code should be unreachable.
-  __ stop("Unreachable");
-
-  // Generate an unreachable reference to the DEFAULT stub so that it can be
-  // found at the end of this stub when clearing ICs at GC.
-  // TODO(kaznacheev): Check performance impact and get rid of this.
-  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
-    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
-    __ CallStub(&uninit);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  Label get_result;
-
-  __ Push(r1, r0);
-
-  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
-  __ mov(r1, Operand(Smi::FromInt(op_)));
-  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
-  __ Push(r2, r1, r0);
-
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 2b1ce4c..811c275 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -71,162 +71,6 @@
 };
 
 
-class GenericBinaryOpStub : public CodeStub {
- public:
-  static const int kUnknownIntValue = -1;
-
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      Register lhs,
-                      Register rhs,
-                      int constant_rhs = kUnknownIntValue)
-      : op_(op),
-        mode_(mode),
-        lhs_(lhs),
-        rhs_(rhs),
-        constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) { }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        lhs_(LhsRegister(RegisterBits::decode(key))),
-        rhs_(RhsRegister(RegisterBits::decode(key))),
-        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
-        runtime_operands_type_(type_info),
-        name_(NULL) { }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  Register lhs_;
-  Register rhs_;
-  int constant_rhs_;
-  bool specialized_on_rhs_;
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-  char* name_;
-
-  static const int kMaxKnownRhs = 0x40000000;
-  static const int kKnownRhsKeyBits = 6;
-
-  // Minor key encoding in 17 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 3> {};
-  class RegisterBits: public BitField<bool, 11, 1> {};
-  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
-           (lhs_.is(r1) && rhs_.is(r0)));
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt())
-           | TypeInfoBits::encode(runtime_operands_type_)
-           | RegisterBits::encode(lhs_.is(r0));
-  }
-
-  void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                             Register lhs,
-                             Register rhs);
-  void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                               Label* not_smi,
-                               Register lhs,
-                               Register rhs,
-                               const Builtins::JavaScript& builtin);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
-    if (constant_rhs == kUnknownIntValue) return false;
-    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
-    if (op == Token::MOD) {
-      if (constant_rhs <= 1) return false;
-      if (constant_rhs <= 10) return true;
-      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
-      return false;
-    }
-    return false;
-  }
-
-  int MinorKeyForKnownInt() {
-    if (!specialized_on_rhs_) return 0;
-    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
-    ASSERT(IsPowerOf2(constant_rhs_));
-    int key = 12;
-    int d = constant_rhs_;
-    while ((d & 1) == 0) {
-      key++;
-      d >>= 1;
-    }
-    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
-    return key;
-  }
-
-  int KnownBitsForMinorKey(int key) {
-    if (!key) return 0;
-    if (key <= 11) return key - 1;
-    int d = 1;
-    while (key != 12) {
-      key--;
-      d <<= 1;
-    }
-    return d;
-  }
-
-  Register LhsRegister(bool lhs_is_r0) {
-    return lhs_is_r0 ? r0 : r1;
-  }
-
-  Register RhsRegister(bool lhs_is_r0) {
-    return lhs_is_r0 ? r1 : r0;
-  }
-
-  bool HasSmiSmiFastPath() {
-    return op_ != Token::DIV;
-  }
-
-  bool ShouldGenerateSmiCode() {
-    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  const char* GetName();
-
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
-#ifdef DEBUG
-  void Print() {
-    if (!specialized_on_rhs_) {
-      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
-    } else {
-      PrintF("GenericBinaryOpStub (%s by %d)\n",
-             Token::String(op_),
-             constant_rhs_);
-    }
-  }
-#endif
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -413,102 +257,6 @@
 };
 
 
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer.  It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis.  This is never called
-// where the denominator is a power of 2.  We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register.  It is always in the range
-//   of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-//   shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-//   register.
-class IntegerModStub : public CodeStub {
- public:
-  IntegerModStub(Register result,
-                 Register shift_distance,
-                 Register odd_number,
-                 Register mask_bits,
-                 Register lhs,
-                 Register scratch)
-      : result_(result),
-        shift_distance_(shift_distance),
-        odd_number_(odd_number),
-        mask_bits_(mask_bits),
-        lhs_(lhs),
-        scratch_(scratch) {
-    // We don't code these in the minor key, so they should always be the same.
-    // We don't really want to fix that since this stub is rather large and we
-    // don't want many copies of it.
-    ASSERT(shift_distance_.is(r9));
-    ASSERT(odd_number_.is(r4));
-    ASSERT(mask_bits_.is(r3));
-    ASSERT(scratch_.is(r5));
-  }
-
- private:
-  Register result_;
-  Register shift_distance_;
-  Register odd_number_;
-  Register mask_bits_;
-  Register lhs_;
-  Register scratch_;
-
-  // Minor key encoding in 16 bits.
-  class ResultRegisterBits: public BitField<int, 0, 4> {};
-  class LhsRegisterBits: public BitField<int, 4, 4> {};
-
-  Major MajorKey() { return IntegerMod; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return ResultRegisterBits::encode(result_.code())
-           | LhsRegisterBits::encode(lhs_.code());
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "IntegerModStub"; }
-
-  // Utility functions.
-  void DigitSum(MacroAssembler* masm,
-                Register lhs,
-                int mask,
-                int shift,
-                Label* entry);
-  void DigitSum(MacroAssembler* masm,
-                Register lhs,
-                Register scratch,
-                int mask,
-                int shift1,
-                int shift2,
-                Label* entry);
-  void ModGetInRangeBySubtraction(MacroAssembler* masm,
-                                  Register lhs,
-                                  int shift,
-                                  int rhs);
-  void ModReduce(MacroAssembler* masm,
-                 Register lhs,
-                 int max,
-                 int denominator);
-  void ModAnswer(MacroAssembler* masm,
-                 Register result,
-                 Register shift_distance,
-                 Register mask_bits,
-                 Register sum_of_digits);
-
-
-#ifdef DEBUG
-  void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
 // This stub can convert a signed int32 to a heap number (double).  It does
 // not work for int32s that are in Smi range!  No GC occurs during this stub
 // so you don't have to set up the frame.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
deleted file mode 100644
index 81ed2d0..0000000
--- a/src/arm/codegen-arm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ARM_CODEGEN_ARM_INL_H_
-#define V8_ARM_CODEGEN_ARM_INL_H_
-
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7b3ea14..bf748a9 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,56 +29,14 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-arm-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  // On ARM you either have a completely spilled frame or you
-  // handle it yourself, but at the moment there's no automation
-  // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -89,7349 +47,6 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      previous_(owner->state()) {
-  owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
-                                             JumpTarget* true_target,
-                                             JumpTarget* false_target)
-    : CodeGenState(owner),
-      true_target_(true_target),
-      false_target_(false_target) {
-  owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
-                                           Slot* slot,
-                                           TypeInfo type_info)
-    : CodeGenState(owner),
-      slot_(slot) {
-  owner->set_state(this);
-  old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
-  owner()->set_type_info(slot_, old_type_info_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      cc_reg_(al),
-      state_(NULL),
-      loop_nesting_(0),
-      type_info_(NULL),
-      function_return_(JumpTarget::BIDIRECTIONAL),
-      function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// r1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-
-  int slots = scope()->num_parameters() + scope()->num_stack_slots();
-  ScopedVector<TypeInfo> type_info_array(slots);
-  for (int i = 0; i < slots; i++) {
-    type_info_array[i] = TypeInfo::Unknown();
-  }
-  type_info_ = &type_info_array;
-
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  cc_reg_ = al;
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  {
-    CodeGenState state(this);
-
-    // Entry:
-    // Stack: receiver, arguments
-    // lr: return address
-    // fp: caller's frame pointer
-    // sp: stack pointer
-    // r1: called JS function
-    // cp: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ stop("stop-at");
-    }
-#endif
-
-    frame_->Enter();
-    // tos: code slot
-
-    // Allocate space for locals and initialize them.  This also checks
-    // for stack overflow.
-    frame_->AllocateStackSlots();
-
-    frame_->AssertIsSpilled();
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      __ ldr(r0, frame_->Function());
-      frame_->EmitPush(r0);
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        frame_->CallStub(&stub, 1);
-      } else {
-        frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-#ifdef DEBUG
-      JumpTarget verified_true;
-      __ cmp(r0, cp);
-      verified_true.Branch(eq);
-      __ stop("NewContext: r0 is expected to be the same as cp");
-      verified_true.Bind();
-#endif
-      // Update context local.
-      __ str(cp, frame_->Context());
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      frame_->AssertIsSpilled();
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          ASSERT(!scope()->is_global_scope());  // No params in global scope.
-          __ ldr(r1, frame_->ParameterAt(i));
-          // Loads r2 with context; used below in RecordWrite.
-          __ str(r1, SlotOperand(slot, r2));
-          // Load the offset into r3.
-          int slot_offset =
-              FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          __ RecordWrite(r2, Operand(slot_offset), r3, r1);
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.SetExpectedHeight();
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-    }
-  }
-
-  // Handle the return from the function.
-  if (has_valid_frame()) {
-    // If there is a valid frame, control flow can fall off the end of
-    // the body.  In that case there is an implicit return statement.
-    ASSERT(!function_return_is_shadowed_);
-    frame_->PrepareForReturn();
-    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    if (function_return_.is_bound()) {
-      function_return_.Jump();
-    } else {
-      function_return_.Bind();
-      GenerateReturnSequence();
-    }
-  } else if (function_return_.is_linked()) {
-    // If the return target has dangling jumps to it, then we have not
-    // yet generated the return sequence.  This can happen when (a)
-    // control does not flow off the end of the body so we did not
-    // compile an artificial return statement just above, and (b) there
-    // are return statements in the body but (c) they are all shadowed.
-    function_return_.Bind();
-    GenerateReturnSequence();
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(!has_cc());
-  ASSERT(state_ == NULL);
-  ASSERT(loop_nesting() == 0);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    ProcessDeferred();
-  }
-
-  allocator_ = NULL;
-  type_info_ = NULL;
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
-  if (slot == NULL) return kInvalidSlotNumber;
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return slot->index();
-    case Slot::LOCAL:
-      return slot->index() + scope()->num_parameters();
-    default:
-      break;
-  }
-  return kInvalidSlotNumber;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(cp));  // do not overwrite context register
-      Register context = cp;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return MemOperand(r0, 0);
-  }
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Register tmp,
-    Register tmp2,
-    JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  Register context = cp;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-        __ tst(tmp2, tmp2);
-        slow->Branch(ne);
-      }
-      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-      context = tmp;
-    }
-  }
-  // Check that last extension is NULL.
-  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-  __ tst(tmp2, tmp2);
-  slow->Branch(ne);
-  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp, slot->index());
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
-                                  JumpTarget* true_target,
-                                  JumpTarget* false_target,
-                                  bool force_cc) {
-  ASSERT(!has_cc());
-  int original_height = frame_->height();
-
-  { ConditionCodeGenState new_state(this, true_target, false_target);
-    Visit(x);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        has_valid_frame() &&
-        !has_cc() &&
-        frame_->height() == original_height) {
-      true_target->Jump();
-    }
-  }
-  if (force_cc && frame_ != NULL && !has_cc()) {
-    // Convert the TOS value to a boolean in the condition code register.
-    ToBoolean(true_target, false_target);
-  }
-  ASSERT(!force_cc || !has_valid_frame() || has_cc());
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-  // We generally assume that we are not in a spilled scope for most
-  // of the code generator.  A failure to ensure this caused issue 815
-  // and this assert is designed to catch similar issues.
-  frame_->AssertIsNotSpilled();
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  JumpTarget true_target;
-  JumpTarget false_target;
-  LoadCondition(expr, &true_target, &false_target, false);
-
-  if (has_cc()) {
-    // Convert cc_reg_ into a boolean value.
-    JumpTarget loaded;
-    JumpTarget materialize_true;
-    materialize_true.Branch(cc_reg_);
-    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-    loaded.Jump();
-    materialize_true.Bind();
-    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    loaded.Bind();
-    cc_reg_ = al;
-  }
-
-  if (true_target.is_linked() || false_target.is_linked()) {
-    // We have at least one condition value that has been "translated"
-    // into a branch, thus it needs to be loaded explicitly.
-    JumpTarget loaded;
-    if (frame_ != NULL) {
-      loaded.Jump();  // Don't lose the current TOS.
-    }
-    bool both = true_target.is_linked() && false_target.is_linked();
-    // Load "true" if necessary.
-    if (true_target.is_linked()) {
-      true_target.Bind();
-      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    }
-    // If both "true" and "false" need to be loaded jump across the code for
-    // "false".
-    if (both) {
-      loaded.Jump();
-    }
-    // Load "false" if necessary.
-    if (false_target.is_linked()) {
-      false_target.Bind();
-      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-    }
-    // A value is loaded on all paths reaching this point.
-    loaded.Bind();
-  }
-  ASSERT(has_valid_frame());
-  ASSERT(!has_cc());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  Register reg = frame_->GetTOSRegister();
-  __ ldr(reg, GlobalObjectOperand());
-  frame_->EmitPush(reg);
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  Register reg = frame_->GetTOSRegister();
-  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(reg,
-         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->EmitPush(reg);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the hole value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
-  } else {
-    frame_->SpillAll();
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    __ ldr(r2, frame_->Function());
-    // The receiver is below the arguments, the return address, and the
-    // frame pointer on the stack.
-    const int kReceiverDisplacement = 2 + scope()->num_parameters();
-    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
-    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-    frame_->Adjust(3);
-    __ Push(r2, r1, r0);
-    frame_->CallStub(&stub, 3);
-    frame_->EmitPush(r0);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-    Register arguments = frame_->PopToRegister();
-    __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-    __ cmp(arguments, ip);
-    done.Branch(ne);
-  }
-  StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  // We generally assume that we are not in a spilled scope for most
-  // of the code generator.  A failure to ensure this caused issue 815
-  // and this assert is designed to catch similar issues.
-  cgen->frame()->AssertIsNotSpilled();
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  int size = ref->size();
-  ref->set_unloaded();
-  if (size == 0) return;
-
-  // Pop a reference from the stack while preserving TOS.
-  VirtualFrame::RegisterAllocationScope scope(this);
-  Comment cmnt(masm_, "[ UnloadReference");
-  if (size > 0) {
-    Register tos = frame_->PopToRegister();
-    frame_->Drop(size);
-    frame_->EmitPush(tos);
-  }
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
-                              JumpTarget* false_target) {
-  // Note: The generated code snippet does not change stack variables.
-  //       Only the condition code should be set.
-  bool known_smi = frame_->KnownSmiAt(0);
-  Register tos = frame_->PopToRegister();
-
-  // Fast case checks
-
-  // Check if the value is 'false'.
-  if (!known_smi) {
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(tos, ip);
-    false_target->Branch(eq);
-
-    // Check if the value is 'true'.
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(tos, ip);
-    true_target->Branch(eq);
-
-    // Check if the value is 'undefined'.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(tos, ip);
-    false_target->Branch(eq);
-  }
-
-  // Check if the value is a smi.
-  __ cmp(tos, Operand(Smi::FromInt(0)));
-
-  if (!known_smi) {
-    false_target->Branch(eq);
-    __ tst(tos, Operand(kSmiTagMask));
-    true_target->Branch(eq);
-
-    // Slow case.
-    if (CpuFeatures::IsSupported(VFP3)) {
-      CpuFeatures::Scope scope(VFP3);
-      // Implements the slow case by using ToBooleanStub.
-      // The ToBooleanStub takes a single argument, and
-      // returns a non-zero value for true, or zero for false.
-      // Both the argument value and the return value use the
-      // register assigned to tos_
-      ToBooleanStub stub(tos);
-      frame_->CallStub(&stub, 0);
-      // Convert the result in "tos" to a condition code.
-      __ cmp(tos, Operand(0, RelocInfo::NONE));
-    } else {
-      // Implements slow case by calling the runtime.
-      frame_->EmitPush(tos);
-      frame_->CallRuntime(Runtime::kToBool, 1);
-      // Convert the result (r0) to a condition code.
-      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(r0, ip);
-    }
-  }
-
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           OverwriteMode overwrite_mode,
-                                           GenerateInlineSmi inline_smi,
-                                           int constant_rhs) {
-  // top of virtual frame: y
-  // 2nd elt. on virtual frame : x
-  // result : top of virtual frame
-
-  // Stub is entered with a call: 'return address' is in lr.
-  switch (op) {
-    case Token::ADD:
-    case Token::SUB:
-      if (inline_smi) {
-        JumpTarget done;
-        Register rhs = frame_->PopToRegister();
-        Register lhs = frame_->PopToRegister(rhs);
-        Register scratch = VirtualFrame::scratch0();
-        __ orr(scratch, rhs, Operand(lhs));
-        // Check they are both small and positive.
-        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
-        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        STATIC_ASSERT(kSmiTag == 0);
-        if (op == Token::ADD) {
-          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
-        } else {
-          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
-        }
-        done.Branch(eq);
-        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-        frame_->SpillAll();
-        frame_->CallStub(&stub, 0);
-        done.Bind();
-        frame_->EmitPush(r0);
-        break;
-      } else {
-        // Fall through!
-      }
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-      if (inline_smi) {
-        bool rhs_is_smi = frame_->KnownSmiAt(0);
-        bool lhs_is_smi = frame_->KnownSmiAt(1);
-        Register rhs = frame_->PopToRegister();
-        Register lhs = frame_->PopToRegister(rhs);
-        Register smi_test_reg;
-        Condition cond;
-        if (!rhs_is_smi || !lhs_is_smi) {
-          if (rhs_is_smi) {
-            smi_test_reg = lhs;
-          } else if (lhs_is_smi) {
-            smi_test_reg = rhs;
-          } else {
-            smi_test_reg = VirtualFrame::scratch0();
-            __ orr(smi_test_reg, rhs, Operand(lhs));
-          }
-          // Check they are both Smis.
-          __ tst(smi_test_reg, Operand(kSmiTagMask));
-          cond = eq;
-        } else {
-          cond = al;
-        }
-        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        if (op == Token::BIT_OR) {
-          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
-        } else if (op == Token::BIT_AND) {
-          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
-        } else {
-          ASSERT(op == Token::BIT_XOR);
-          STATIC_ASSERT(kSmiTag == 0);
-          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
-        }
-        if (cond != al) {
-          JumpTarget done;
-          done.Branch(cond);
-          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-          frame_->SpillAll();
-          frame_->CallStub(&stub, 0);
-          done.Bind();
-        }
-        frame_->EmitPush(r0);
-        break;
-      } else {
-        // Fall through!
-      }
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      Register rhs = frame_->PopToRegister();
-      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
-      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-      frame_->SpillAll();
-      frame_->CallStub(&stub, 0);
-      frame_->EmitPush(r0);
-      break;
-    }
-
-    case Token::COMMA: {
-      Register scratch = frame_->PopToRegister();
-      // Simply discard left value.
-      frame_->Drop();
-      frame_->EmitPush(scratch);
-      break;
-    }
-
-    default:
-      // Other cases should have been handled before this point.
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             int value,
-                             bool reversed,
-                             OverwriteMode overwrite_mode,
-                             Register tos)
-      : op_(op),
-        value_(value),
-        reversed_(reversed),
-        overwrite_mode_(overwrite_mode),
-        tos_register_(tos) {
-    set_comment("[ DeferredInlinedSmiOperation");
-  }
-
-  virtual void Generate();
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
-  // methods, it is the responsibility of the deferred code to save and restore
-  // registers.
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToNonSmiInput(Condition cond);
-  void JumpToAnswerOutOfRange(Condition cond);
-
- private:
-  void GenerateNonSmiInput();
-  void GenerateAnswerOutOfRange();
-  void WriteNonSmiAnswer(Register answer,
-                         Register heap_number,
-                         Register scratch);
-
-  Token::Value op_;
-  int value_;
-  bool reversed_;
-  OverwriteMode overwrite_mode_;
-  Register tos_register_;
-  Label non_smi_input_;
-  Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
-  ASSERT(Token::IsBitOp(op_));
-
-  __ b(cond, &non_smi_input_);
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
-  ASSERT(Token::IsBitOp(op_));
-
-  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
-    // >>> requires an unsigned to double conversion and the non VFP code
-    // does not support this conversion.
-    __ b(cond, entry_label());
-  } else {
-    __ b(cond, &answer_out_of_range_);
-  }
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere.  The tos_register_ is not used by the
-// virtual frame.  On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register lhs = r1;
-  Register rhs = r0;
-  switch (op_) {
-    case Token::ADD: {
-      // Revert optimistic add.
-      if (reversed_) {
-        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      } else {
-        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      }
-      break;
-    }
-
-    case Token::SUB: {
-      // Revert optimistic sub.
-      if (reversed_) {
-        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      } else {
-        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      }
-      break;
-    }
-
-    // For these operations there is no optimistic operation that needs to be
-    // reverted.
-    case Token::MUL:
-    case Token::MOD:
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      if (tos_register_.is(r1)) {
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      } else {
-        ASSERT(tos_register_.is(r0));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      }
-      if (reversed_ == tos_register_.is(r1)) {
-          lhs = r0;
-          rhs = r1;
-      }
-      break;
-    }
-
-    default:
-      // Other cases should have been handled before this point.
-      UNREACHABLE();
-      break;
-  }
-
-  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
-  __ CallStub(&stub);
-
-  // The generic stub returns its value in r0, but that's not
-  // necessarily what we want.  We want whatever the inlined code
-  // expected, which is that the answer is in the same register as
-  // the operand was.
-  __ Move(tos_register_, r0);
-
-  // The tos register was not in use for the virtual frame that we
-  // came into this function with, so we can merge back to that frame
-  // without trashing it.
-  copied_frame.MergeTo(frame_state()->frame());
-
-  Exit();
-
-  if (non_smi_input_.is_linked()) {
-    GenerateNonSmiInput();
-  }
-
-  if (answer_out_of_range_.is_linked()) {
-    GenerateAnswerOutOfRange();
-  }
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
-                                                   Register heap_number,
-                                                   Register scratch) {
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(s0, answer);
-    if (op_ == Token::SHR) {
-      __ vcvt_f64_u32(d0, s0);
-    } else {
-      __ vcvt_f64_s32(d0, s0);
-    }
-    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
-    __ vstr(d0, scratch, HeapNumber::kValueOffset);
-  } else {
-    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
-    __ CallStub(&stub);
-  }
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
-  // We know the left hand side is not a Smi and the right hand side is an
-  // immediate value (value_) which can be represented as a Smi. We only
-  // handle bit operations.
-  ASSERT(Token::IsBitOp(op_));
-
-  if (FLAG_debug_code) {
-    __ Abort("Should not fall through!");
-  }
-
-  __ bind(&non_smi_input_);
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(tos_register_);
-  }
-
-  // This routine uses the registers from r2 to r6.  At the moment they are
-  // not used by the register allocator, but when they are it should use
-  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
-  Register heap_number_map = r7;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
-  __ cmp(r3, heap_number_map);
-  // Not a number, fall back to the GenericBinaryOpStub.
-  __ b(ne, entry_label());
-
-  Register int32 = r2;
-  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
-  __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
-
-  // tos_register_ (r0 or r1): Original heap number.
-  // int32: signed 32bits int.
-
-  Label result_not_a_smi;
-  int shift_value = value_ & 0x1f;
-  switch (op_) {
-    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
-    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
-    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
-    case Token::SAR:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-         __ mov(int32, Operand(int32, ASR, shift_value));
-      }
-      break;
-    case Token::SHR:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
-      } else {
-        // SHR is special because it is required to produce a positive answer.
-        __ cmp(int32, Operand(0, RelocInfo::NONE));
-      }
-      if (CpuFeatures::IsSupported(VFP3)) {
-        __ b(mi, &result_not_a_smi);
-      } else {
-        // Non VFP code cannot convert from unsigned to double, so fall back
-        // to GenericBinaryOpStub.
-        __ b(mi, entry_label());
-      }
-      break;
-    case Token::SHL:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-        __ mov(int32, Operand(int32, LSL, shift_value));
-      }
-      break;
-    default: UNREACHABLE();
-  }
-  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
-  // if the shift if more than 0 or SHR if the shit is more than 1.
-  if (!( (op_ == Token::AND && value_ >= 0) ||
-        ((op_ == Token::SAR) && (shift_value > 0)) ||
-        ((op_ == Token::SHR) && (shift_value > 1)))) {
-    __ add(r3, int32, Operand(0x40000000), SetCC);
-    __ b(mi, &result_not_a_smi);
-  }
-  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
-  Exit();
-
-  if (result_not_a_smi.is_linked()) {
-    __ bind(&result_not_a_smi);
-    if (overwrite_mode_ != OVERWRITE_LEFT) {
-      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
-             (overwrite_mode_ == OVERWRITE_RIGHT));
-      // If the allocation fails, fall back to the GenericBinaryOpStub.
-      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
-      // Nothing can go wrong now, so overwrite tos.
-      __ mov(tos_register_, Operand(r4));
-    }
-
-    // int32: answer as signed 32bits integer.
-    // tos_register_: Heap number to write the answer into.
-    WriteNonSmiAnswer(int32, tos_register_, r3);
-
-    Exit();
-  }
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
-  // The input from a bitwise operation were Smis but the result cannot fit
-  // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
-  // holds the untagged result to be converted.  tos_register_ contains the
-  // input.  See the calls to JumpToAnswerOutOfRange to see how we got here.
-  ASSERT(Token::IsBitOp(op_));
-  ASSERT(!reversed_);
-
-  Register untagged_result = VirtualFrame::scratch0();
-
-  if (FLAG_debug_code) {
-    __ Abort("Should not fall through!");
-  }
-
-  __ bind(&answer_out_of_range_);
-  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
-    // >>> 0 is a special case where the untagged_result register is not set up
-    // yet.  We untag the input to get it.
-    __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
-  }
-
-  // This routine uses the registers from r2 to r6.  At the moment they are
-  // not used by the register allocator, but when they are it should use
-  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
-  // Allocate the result heap number.
-  Register heap_number_map = VirtualFrame::scratch1();
-  Register heap_number = r4;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // If the allocation fails, fall back to the GenericBinaryOpStub.
-  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
-  WriteNonSmiAnswer(untagged_result, heap_number, r3);
-  __ mov(tos_register_, Operand(heap_number));
-
-  Exit();
-}
-
-
-static bool PopCountLessThanEqual2(unsigned int x) {
-  x &= x - 1;
-  return (x & (x - 1)) == 0;
-}
-
-
-// Returns the index of the lowest bit set.
-static int BitPosition(unsigned x) {
-  int bit_posn = 0;
-  while ((x & 0xf) == 0) {
-    bit_posn += 4;
-    x >>= 4;
-  }
-  while ((x & 1) == 0) {
-    bit_posn++;
-    x >>= 1;
-  }
-  return bit_posn;
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
-  if (x < 2) return false;                          // Avoid special cases.
-  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
-  if (IsPowerOf2(x)) return true;                   // Simple shift.
-  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
-  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
-  return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register.  This routine does
-// not set carry and overflow the way a mul instruction would.
-static void InlineMultiplyByKnownInt(MacroAssembler* masm,
-                                     Register source,
-                                     Register destination,
-                                     int known_int) {
-  if (IsPowerOf2(known_int)) {
-    masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
-  } else if (PopCountLessThanEqual2(known_int)) {
-    int first_bit = BitPosition(known_int);
-    int second_bit = BitPosition(known_int ^ (1 << first_bit));
-    masm->add(destination, source,
-              Operand(source, LSL, second_bit - first_bit));
-    if (first_bit != 0) {
-      masm->mov(destination, Operand(destination, LSL, first_bit));
-    }
-  } else {
-    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
-    int the_bit = BitPosition(known_int + 1);
-    masm->rsb(destination, source, Operand(source, LSL, the_bit));
-  }
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
-                                 Handle<Object> value,
-                                 bool reversed,
-                                 OverwriteMode mode) {
-  int int_value = Smi::cast(*value)->value();
-
-  bool both_sides_are_smi = frame_->KnownSmiAt(0);
-
-  bool something_to_inline;
-  switch (op) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::BIT_AND:
-    case Token::BIT_OR:
-    case Token::BIT_XOR: {
-      something_to_inline = true;
-      break;
-    }
-    case Token::SHL: {
-      something_to_inline = (both_sides_are_smi || !reversed);
-      break;
-    }
-    case Token::SHR:
-    case Token::SAR: {
-      if (reversed) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    case Token::MOD: {
-      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    case Token::MUL: {
-      if (!IsEasyToMultiplyBy(int_value)) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    default: {
-      something_to_inline = false;
-      break;
-    }
-  }
-
-  if (!something_to_inline) {
-    if (!reversed) {
-      // Push the rhs onto the virtual frame by putting it in a TOS register.
-      Register rhs = frame_->GetTOSRegister();
-      __ mov(rhs, Operand(value));
-      frame_->EmitPush(rhs, TypeInfo::Smi());
-      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
-    } else {
-      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
-      // at most one pop, the rest takes place in TOS registers.
-      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
-      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
-      __ mov(lhs, Operand(value));
-      frame_->EmitPush(lhs, TypeInfo::Smi());
-      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
-      frame_->EmitPush(rhs, t);
-      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
-                             GenericBinaryOpStub::kUnknownIntValue);
-    }
-    return;
-  }
-
-  // We move the top of stack to a register (normally no move is invoved).
-  Register tos = frame_->PopToRegister();
-  switch (op) {
-    case Token::ADD: {
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
-      __ add(tos, tos, Operand(value), SetCC);
-      deferred->Branch(vs);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-    case Token::SUB: {
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
-      if (reversed) {
-        __ rsb(tos, tos, Operand(value), SetCC);
-      } else {
-        __ sub(tos, tos, Operand(value), SetCC);
-      }
-      deferred->Branch(vs);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      if (both_sides_are_smi) {
-        switch (op) {
-          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
-          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
-          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
-          default: UNREACHABLE();
-        }
-        frame_->EmitPush(tos, TypeInfo::Smi());
-      } else {
-        DeferredInlineSmiOperation* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->JumpToNonSmiInput(ne);
-        switch (op) {
-          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
-          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
-          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
-          default: UNREACHABLE();
-        }
-        deferred->BindExit();
-        TypeInfo result_type = TypeInfo::Integer32();
-        if (op == Token::BIT_AND && int_value >= 0) {
-          result_type = TypeInfo::Smi();
-        }
-        frame_->EmitPush(tos, result_type);
-      }
-      break;
-    }
-
-    case Token::SHL:
-      if (reversed) {
-        ASSERT(both_sides_are_smi);
-        int max_shift = 0;
-        int max_result = int_value == 0 ? 1 : int_value;
-        while (Smi::IsValid(max_result << 1)) {
-          max_shift++;
-          max_result <<= 1;
-        }
-        DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
-        // Mask off the last 5 bits of the shift operand (rhs).  This is part
-        // of the definition of shift in JS and we know we have a Smi so we
-        // can safely do this.  The masked version gets passed to the
-        // deferred code, but that makes no difference.
-        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
-        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
-        deferred->Branch(ge);
-        Register scratch = VirtualFrame::scratch0();
-        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
-        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
-        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
-        deferred->BindExit();
-        TypeInfo result = TypeInfo::Integer32();
-        frame_->EmitPush(tos, result);
-        break;
-      }
-      // Fall through!
-    case Token::SHR:
-    case Token::SAR: {
-      ASSERT(!reversed);
-      int shift_value = int_value & 0x1f;
-      TypeInfo result = TypeInfo::Number();
-
-      if (op == Token::SHR) {
-        if (shift_value > 1) {
-          result = TypeInfo::Smi();
-        } else if (shift_value > 0) {
-          result = TypeInfo::Integer32();
-        }
-      } else if (op == Token::SAR) {
-        if (shift_value > 0) {
-          result = TypeInfo::Smi();
-        } else {
-          result = TypeInfo::Integer32();
-        }
-      } else {
-        ASSERT(op == Token::SHL);
-        result = TypeInfo::Integer32();
-      }
-
-      DeferredInlineSmiOperation* deferred =
-        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->JumpToNonSmiInput(ne);
-      }
-      switch (op) {
-        case Token::SHL: {
-          if (shift_value != 0) {
-            Register untagged_result = VirtualFrame::scratch0();
-            Register scratch = VirtualFrame::scratch1();
-            int adjusted_shift = shift_value - kSmiTagSize;
-            ASSERT(adjusted_shift >= 0);
-
-            if (adjusted_shift != 0) {
-              __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
-            } else {
-              __ mov(untagged_result, Operand(tos));
-            }
-            // Check that the *signed* result fits in a smi.
-            __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
-            deferred->JumpToAnswerOutOfRange(mi);
-            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
-          }
-          break;
-        }
-        case Token::SHR: {
-          if (shift_value != 0) {
-            Register untagged_result = VirtualFrame::scratch0();
-            // Remove tag.
-            __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
-            __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
-            if (shift_value == 1) {
-              // Check that the *unsigned* result fits in a smi.
-              // Neither of the two high-order bits can be set:
-              // - 0x80000000: high bit would be lost when smi tagging
-              // - 0x40000000: this number would convert to negative when Smi
-              //   tagging.
-              // These two cases can only happen with shifts by 0 or 1 when
-              // handed a valid smi.
-              __ tst(untagged_result, Operand(0xc0000000));
-              deferred->JumpToAnswerOutOfRange(ne);
-            }
-            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
-          } else {
-            __ cmp(tos, Operand(0, RelocInfo::NONE));
-            deferred->JumpToAnswerOutOfRange(mi);
-          }
-          break;
-        }
-        case Token::SAR: {
-          if (shift_value != 0) {
-            // Do the shift and the tag removal in one operation. If the shift
-            // is 31 bits (the highest possible value) then we emit the
-            // instruction as a shift by 0 which in the ARM ISA means shift
-            // arithmetically by 32.
-            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
-            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
-          }
-          break;
-        }
-        default: UNREACHABLE();
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos, result);
-      break;
-    }
-
-    case Token::MOD: {
-      ASSERT(!reversed);
-      ASSERT(int_value >= 2);
-      ASSERT(IsPowerOf2(int_value));
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-      unsigned mask = (0x80000000u | kSmiTagMask);
-      __ tst(tos, Operand(mask));
-      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
-      mask = (int_value << kSmiTagSize) - 1;
-      __ and_(tos, tos, Operand(mask));
-      deferred->BindExit();
-      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
-      frame_->EmitPush(
-          tos,
-          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
-      break;
-    }
-
-    case Token::MUL: {
-      ASSERT(IsEasyToMultiplyBy(int_value));
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
-      max_smi_that_wont_overflow <<= kSmiTagSize;
-      unsigned mask = 0x80000000u;
-      while ((mask & max_smi_that_wont_overflow) == 0) {
-        mask |= mask >> 1;
-      }
-      mask |= kSmiTagMask;
-      // This does a single mask that checks for a too high value in a
-      // conservative way and for a non-Smi.  It also filters out negative
-      // numbers, unfortunately, but since this code is inline we prefer
-      // brevity to comprehensiveness.
-      __ tst(tos, Operand(mask));
-      deferred->Branch(ne);
-      InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void CodeGenerator::Comparison(Condition cond,
-                               Expression* left,
-                               Expression* right,
-                               bool strict) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-
-  if (left != NULL) Load(left);
-  if (right != NULL) Load(right);
-
-  // sp[0] : y
-  // sp[1] : x
-  // result : cc register
-
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cond == eq);
-
-  Register lhs;
-  Register rhs;
-
-  bool lhs_is_smi;
-  bool rhs_is_smi;
-
-  // We load the top two stack positions into registers chosen by the virtual
-  // frame.  This should keep the register shuffling to a minimum.
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cond == gt || cond == le) {
-    cond = ReverseCondition(cond);
-    lhs_is_smi = frame_->KnownSmiAt(0);
-    rhs_is_smi = frame_->KnownSmiAt(1);
-    lhs = frame_->PopToRegister();
-    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
-  } else {
-    rhs_is_smi = frame_->KnownSmiAt(0);
-    lhs_is_smi = frame_->KnownSmiAt(1);
-    rhs = frame_->PopToRegister();
-    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
-  }
-
-  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
-
-  ASSERT(rhs.is(r0) || rhs.is(r1));
-  ASSERT(lhs.is(r0) || lhs.is(r1));
-
-  JumpTarget exit;
-
-  if (!both_sides_are_smi) {
-    // Now we have the two sides in r0 and r1.  We flush any other registers
-    // because the stub doesn't know about register allocation.
-    frame_->SpillAll();
-    Register scratch = VirtualFrame::scratch0();
-    Register smi_test_reg;
-    if (lhs_is_smi) {
-      smi_test_reg = rhs;
-    } else if (rhs_is_smi) {
-      smi_test_reg = lhs;
-    } else {
-      __ orr(scratch, lhs, Operand(rhs));
-      smi_test_reg = scratch;
-    }
-    __ tst(smi_test_reg, Operand(kSmiTagMask));
-    JumpTarget smi;
-    smi.Branch(eq);
-
-    // Perform non-smi comparison by stub.
-    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
-    // We call with 0 args because there are 0 on the stack.
-    CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
-    frame_->CallStub(&stub, 0);
-    __ cmp(r0, Operand(0, RelocInfo::NONE));
-    exit.Jump();
-
-    smi.Bind();
-  }
-
-  // Do smi comparisons by pointer comparison.
-  __ cmp(lhs, Operand(rhs));
-
-  exit.Bind();
-  cc_reg_ = cond;
-}
-
-
-// Call the function on the stack with the given arguments.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  frame_->CallStub(&call_function, arg_count + 1);
-
-  // Restore context and pop function from the stack.
-  __ ldr(cp, frame_->Context());
-  frame_->Drop();  // discard the TOS
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame_->Dup();
-  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
-  frame_->EmitPush(r0);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // At this point the top two stack elements are probably in registers
-  // since they were just loaded.  Ensure they are in regs and get the
-  // regs.
-  Register receiver_reg = frame_->Peek2();
-  Register arguments_reg = frame_->Peek();
-
-  // From now on the frame is spilled.
-  frame_->SpillAll();
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of the stack at this point:
-  //   sp[0]: arguments object of the current function or the hole.
-  //   sp[1]: receiver
-  //   sp[2]: applicand.apply
-  //   sp[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  JumpTarget slow;
-  Label done;
-  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-  __ cmp(ip, arguments_reg);
-  slow.Branch(ne);
-
-  Label build_args;
-  // Get rid of the arguments object probe.
-  frame_->Drop();
-  // Stack now has 3 elements on it.
-  // Contents of stack at this point:
-  //   sp[0]: receiver - in the receiver_reg register.
-  //   sp[1]: applicand.apply
-  //   sp[2]: applicand.
-
-  // Check that the receiver really is a JavaScript object.
-  __ JumpIfSmi(receiver_reg, &build_args);
-  // We allow all JSObjects including JSFunctions.  As long as
-  // JS_FUNCTION_TYPE is the last instance type and it is right
-  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-  // bound.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &build_args);
-
-  // Check that applicand.apply is Function.prototype.apply.
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-  __ JumpIfSmi(r0, &build_args);
-  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &build_args);
-  Handle<Code> apply_code(
-      Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
-  __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-  __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ cmp(r1, Operand(apply_code));
-  __ b(ne, &build_args);
-
-  // Check that applicand is a function.
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ JumpIfSmi(r1, &build_args);
-  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
-  __ b(ne, &build_args);
-
-  // Copy the arguments to this function possibly from the
-  // adaptor frame below it.
-  Label invoke, adapted;
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adapted);
-
-  // No arguments adaptor frame. Copy fixed number of arguments.
-  __ mov(r0, Operand(scope()->num_parameters()));
-  for (int i = 0; i < scope()->num_parameters(); i++) {
-    __ ldr(r2, frame_->ParameterAt(i));
-    __ push(r2);
-  }
-  __ jmp(&invoke);
-
-  // Arguments adaptor frame present. Copy arguments from there, but
-  // avoid copying too many arguments to avoid stack overflows.
-  __ bind(&adapted);
-  static const uint32_t kArgumentsLimit = 1 * KB;
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
-  __ mov(r3, r0);
-  __ cmp(r0, Operand(kArgumentsLimit));
-  __ b(gt, &build_args);
-
-  // Loop through the arguments pushing them onto the execution
-  // stack. We don't inform the virtual frame of the push, so we don't
-  // have to worry about getting rid of the elements from the virtual
-  // frame.
-  Label loop;
-  // r3 is a small non-negative integer, due to the test above.
-  __ cmp(r3, Operand(0, RelocInfo::NONE));
-  __ b(eq, &invoke);
-  // Compute the address of the first argument.
-  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
-  __ add(r2, r2, Operand(kPointerSize));
-  __ bind(&loop);
-  // Post-decrement argument address by kPointerSize on each iteration.
-  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
-  __ push(r4);
-  __ sub(r3, r3, Operand(1), SetCC);
-  __ b(gt, &loop);
-
-  // Invoke the function.
-  __ bind(&invoke);
-  ParameterCount actual(r0);
-  __ InvokeFunction(r1, actual, CALL_FUNCTION);
-  // Drop applicand.apply and applicand from the stack, and push
-  // the result of the function call, but leave the spilled frame
-  // unchanged, with 3 elements, so it is correct when we compile the
-  // slow-case code.
-  __ add(sp, sp, Operand(2 * kPointerSize));
-  __ push(r0);
-  // Stack now has 1 element:
-  //   sp[0]: result
-  __ jmp(&done);
-
-  // Slow-case: Allocate the arguments object since we know it isn't
-  // there, and fall-through to the slow-case where we call
-  // applicand.apply.
-  __ bind(&build_args);
-  // Stack now has 3 elements, because we have jumped from where:
-  //   sp[0]: receiver
-  //   sp[1]: applicand.apply
-  //   sp[2]: applicand.
-  StoreArgumentsObject(false);
-
-  // Stack and frame now have 4 elements.
-  slow.Bind();
-
-  // Generic computation of x.apply(y, args) with no special optimization.
-  // Flip applicand.apply and applicand on the stack, so
-  // applicand looks like the receiver of the applicand.apply call.
-  // Then process it as a normal function call.
-  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
-
-  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-  frame_->CallStub(&call_function, 3);
-  // The function and its two arguments have been dropped.
-  frame_->Drop();  // Drop the receiver as well.
-  frame_->EmitPush(r0);
-  frame_->SpillAll();  // A spilled frame is also jumping to label done.
-  // Stack now has 1 element:
-  //   sp[0]: result
-  __ bind(&done);
-
-  // Restore the context register after a call.
-  __ ldr(cp, frame_->Context());
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  ASSERT(has_cc());
-  Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
-  target->Branch(cond);
-  cc_reg_ = al;
-}
-
-
-void CodeGenerator::CheckStack() {
-  frame_->SpillAll();
-  Comment cmnt(masm_, "[ check stack");
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  masm_->cmp(sp, Operand(ip));
-  StackCheckStub stub;
-  // Call the stub if lower.
-  masm_->mov(ip,
-             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                     RelocInfo::CODE_TARGET),
-             LeaveCC,
-             lo);
-  masm_->Call(ip, lo);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  frame_->EmitPush(cp);
-  frame_->EmitPush(Operand(pairs));
-  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-  frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(var->name()));
-    // Declaration nodes are always declared in only two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Operand(Smi::FromInt(attr)));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Operand(0, RelocInfo::NONE));
-    }
-
-    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-
-    ASSERT(frame_->height() == original_height);
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-
-  if (val != NULL) {
-    WriteBarrierCharacter wb_info =
-        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
-    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
-    // Set initial value.
-    Reference target(this, node->proxy());
-    Load(val);
-    target.SetValue(NOT_CONST_INIT, wb_info);
-
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  frame_->Drop();
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    Comment cmnt(masm_, "[ IfThenElse");
-    JumpTarget then;
-    JumpTarget else_;
-    // if (cond)
-    LoadCondition(node->condition(), &then, &else_, true);
-    if (frame_ != NULL) {
-      Branch(false, &else_);
-    }
-    // then
-    if (frame_ != NULL || then.is_linked()) {
-      then.Bind();
-      Visit(node->then_statement());
-    }
-    if (frame_ != NULL) {
-      exit.Jump();
-    }
-    // else
-    if (else_.is_linked()) {
-      else_.Bind();
-      Visit(node->else_statement());
-    }
-
-  } else if (has_then_stm) {
-    Comment cmnt(masm_, "[ IfThen");
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    // if (cond)
-    LoadCondition(node->condition(), &then, &exit, true);
-    if (frame_ != NULL) {
-      Branch(false, &exit);
-    }
-    // then
-    if (frame_ != NULL || then.is_linked()) {
-      then.Bind();
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    Comment cmnt(masm_, "[ IfElse");
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    // if (!cond)
-    LoadCondition(node->condition(), &exit, &else_, true);
-    if (frame_ != NULL) {
-      Branch(true, &exit);
-    }
-    // else
-    if (frame_ != NULL || else_.is_linked()) {
-      else_.Bind();
-      Visit(node->else_statement());
-    }
-
-  } else {
-    Comment cmnt(masm_, "[ If");
-    ASSERT(!has_then_stm && !has_else_stm);
-    // if (cond)
-    LoadCondition(node->condition(), &exit, &exit, false);
-    if (frame_ != NULL) {
-      if (has_cc()) {
-        cc_reg_ = al;
-      } else {
-        frame_->Drop();
-      }
-    }
-  }
-
-  // end
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  frame_->PopToR0();
-  frame_->PrepareForReturn();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump();
-  } else {
-    // Pop the result from the frame and prepare the frame for
-    // returning thus making it easier to merge.
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump();
-    } else {
-      function_return_.Bind();
-      GenerateReturnSequence();
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
-  if (FLAG_trace) {
-    // Push the return value on the stack as the parameter.
-    // Runtime::TraceExit returns the parameter as it is.
-    frame_->EmitPush(r0);
-    frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-
-#ifdef DEBUG
-  // Add a label for checking the size of the code used for returning.
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-  // Make sure that the constant pool is not emitted inside of the return
-  // sequence.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Tear down the frame which will restore the caller's frame pointer and
-    // the link register.
-    frame_->Exit();
-
-    // Here we use masm_-> instead of the __ macro to avoid the code coverage
-    // tool from instrumenting as we rely on the code size here.
-    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
-    masm_->add(sp, sp, Operand(sp_delta));
-    masm_->Jump(lr);
-    DeleteFrame();
-
-#ifdef DEBUG
-    // Check that the size of the code used for returning is large enough
-    // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceInstructions <=
-           masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
-  }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  if (node->is_catch_block()) {
-    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-#ifdef DEBUG
-  JumpTarget verified_true;
-  __ cmp(r0, cp);
-  verified_true.Branch(eq);
-  __ stop("PushContext: r0 is expected to be the same as cp");
-  verified_true.Bind();
-#endif
-  // Update context local.
-  __ str(cp, frame_->Context());
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
-  // Update context local.
-  __ str(cp, frame_->Context());
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-
-  Load(node->tag());
-
-  JumpTarget next_test;
-  JumpTarget fall_through;
-  JumpTarget default_entry;
-  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-    if (clause->is_default()) {
-      // Remember the default clause and compile it at the end.
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case clause");
-    // Compile the test.
-    next_test.Bind();
-    next_test.Unuse();
-    // Duplicate TOS.
-    frame_->Dup();
-    Comparison(eq, NULL, clause->label(), true);
-    Branch(false, &next_test);
-
-    // Before entering the body from the test, remove the switch value from
-    // the stack.
-    frame_->Drop();
-
-    // Label the body so that fall through is enabled.
-    if (i > 0 && cases->at(i - 1)->is_default()) {
-      default_exit.Bind();
-    } else {
-      fall_through.Bind();
-      fall_through.Unuse();
-    }
-    VisitStatements(clause->statements());
-
-    // If control flow can fall through from the body, jump to the next body
-    // or the end of the statement.
-    if (frame_ != NULL) {
-      if (i < length - 1 && cases->at(i + 1)->is_default()) {
-        default_entry.Jump();
-      } else {
-        fall_through.Jump();
-      }
-    }
-  }
-
-  // The final "test" removes the switch value.
-  next_test.Bind();
-  frame_->Drop();
-
-  // If there is a default clause, compile it.
-  if (default_clause != NULL) {
-    Comment cmnt(masm_, "[ Default clause");
-    default_entry.Bind();
-    VisitStatements(default_clause->statements());
-    // If control flow can fall out of the default and there is a case after
-    // it, jump to that case's body.
-    if (frame_ != NULL && default_exit.is_bound()) {
-      default_exit.Jump();
-    }
-  }
-
-  if (fall_through.is_linked()) {
-    fall_through.Bind();
-  }
-
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  // Label the top of the loop for the backward CFG edge.  If the test
-  // is always true we can use the continue target, and if the test is
-  // always false there is no need.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  switch (info) {
-    case ALWAYS_TRUE:
-      node->continue_target()->SetExpectedHeight();
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      node->continue_target()->SetExpectedHeight();
-      break;
-    case DONT_KNOW:
-      node->continue_target()->SetExpectedHeight();
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control can fall off the end of the body, jump back to the
-      // top.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // If we have a continue in the body, we only have to bind its
-      // jump target.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        LoadCondition(node->cond(), &body, node->break_target(), true);
-        if (has_valid_frame()) {
-          // A invalid frame here indicates that control did not
-          // fall out of the test expression.
-          Branch(true, &body);
-        }
-      }
-      break;
-  }
-
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the test is never true and has no side effects there is no need
-  // to compile the test or body.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  node->break_target()->SetExpectedHeight();
-  IncrementLoopNesting();
-
-  // Label the top of the loop with the continue target for the backward
-  // CFG edge.
-  node->continue_target()->SetExpectedHeight();
-  node->continue_target()->Bind();
-
-  if (info == DONT_KNOW) {
-    JumpTarget body(JumpTarget::BIDIRECTIONAL);
-    LoadCondition(node->cond(), &body, node->break_target(), true);
-    if (has_valid_frame()) {
-      // A NULL frame indicates that control did not fall out of the
-      // test expression.
-      Branch(false, node->break_target());
-    }
-    if (has_valid_frame() || body.is_linked()) {
-      body.Bind();
-    }
-  }
-
-  if (has_valid_frame()) {
-    CheckStack();  // TODO(1222600): ignore if body contains calls.
-    Visit(node->body());
-
-    // If control flow can fall out of the body, jump back to the top.
-    if (has_valid_frame()) {
-      node->continue_target()->Jump();
-    }
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the test is never true there is no need to compile the test or
-  // body.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  node->break_target()->SetExpectedHeight();
-  IncrementLoopNesting();
-
-  // We know that the loop index is a smi if it is not modified in the
-  // loop body and it is checked against a constant limit in the loop
-  // condition.  In this case, we reset the static type information of the
-  // loop index to smi before compiling the body, the update expression, and
-  // the bottom check of the loop condition.
-  TypeInfoCodeGenState type_info_scope(this,
-                                       node->is_fast_smi_loop() ?
-                                       node->loop_variable()->AsSlot() :
-                                       NULL,
-                                       TypeInfo::Smi());
-
-  // If there is no update statement, label the top of the loop with the
-  // continue target, otherwise with the loop target.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  if (node->next() == NULL) {
-    node->continue_target()->SetExpectedHeight();
-    node->continue_target()->Bind();
-  } else {
-    node->continue_target()->SetExpectedHeight();
-    loop.Bind();
-  }
-
-  // If the test is always true, there is no need to compile it.
-  if (info == DONT_KNOW) {
-    JumpTarget body;
-    LoadCondition(node->cond(), &body, node->break_target(), true);
-    if (has_valid_frame()) {
-      Branch(false, node->break_target());
-    }
-    if (has_valid_frame() || body.is_linked()) {
-      body.Bind();
-    }
-  }
-
-  if (has_valid_frame()) {
-    CheckStack();  // TODO(1222600): ignore if body contains calls.
-    Visit(node->body());
-
-    if (node->next() == NULL) {
-      // If there is no update statement and control flow can fall out
-      // of the loop, jump directly to the continue label.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-    } else {
-      // If there is an update statement and control flow can reach it
-      // via falling out of the body of the loop or continuing, we
-      // compile the update statement.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        // Record source position of the statement as this code which is
-        // after the code for the body actually belongs to the loop
-        // statement and not the body.
-        CodeForStatementPosition(node);
-        Visit(node->next());
-        loop.Jump();
-      }
-    }
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  Load(node->enumerable());
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(r0);
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
-  exit.Branch(eq);
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(r0, ip);
-  exit.Branch(eq);
-
-  // Stack layout in body:
-  // [iteration counter (Smi)]
-  // [length of array]
-  // [FixedArray]
-  // [Map or 0]
-  // [Object]
-
-  // Check if enumerable is already a JSObject
-  __ tst(r0, Operand(kSmiTagMask));
-  primitive.Branch(eq);
-  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
-  jsobject.Branch(hs);
-
-  primitive.Bind();
-  frame_->EmitPush(r0);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // r0: value to be iterated over
-  frame_->EmitPush(r0);  // Push the object being iterated over.
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ mov(r1, Operand(r0));
-  loop.Bind();
-  // Check that there are no elements.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-  __ cmp(r2, r4);
-  call_runtime.Branch(ne);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in r3 for the subsequent
-  // prototype load.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
-  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
-  __ cmp(r2, ip);
-  call_runtime.Branch(eq);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
-  __ tst(r2, Operand(kSmiTagMask));
-  call_runtime.Branch(eq);
-  // For all objects but the receiver, check that the cache is empty.
-  // r4: empty fixed array root.
-  __ cmp(r1, r0);
-  check_prototype.Branch(eq);
-  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(r2, r4);
-  call_runtime.Branch(ne);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(r1, ip);
-  loop.Branch(ne);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a map from the runtime call, we can do a fast
-  // modification check. Otherwise, we got a fixed array, and we have
-  // to do a slow check.
-  // r0: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ mov(r2, Operand(r0));
-  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
-  __ cmp(r1, ip);
-  fixed_array.Branch(ne);
-
-  use_cache.Bind();
-  // Get enum cache
-  // r0: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ mov(r1, Operand(r0));
-  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
-  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
-  __ ldr(r2,
-         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(r0);  // map
-  frame_->EmitPush(r2);  // enum cache bridge cache
-  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(Smi::FromInt(0)));
-  frame_->EmitPush(r0);
-  entry.Jump();
-
-  fixed_array.Bind();
-  __ mov(r1, Operand(Smi::FromInt(0)));
-  frame_->EmitPush(r1);  // insert 0 in place of Map
-  frame_->EmitPush(r0);
-
-  // Push the length of the array and the initial index onto the stack.
-  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
-  frame_->EmitPush(r0);
-
-  // Condition.
-  entry.Bind();
-  // sp[0] : index
-  // sp[1] : array/enum cache length
-  // sp[2] : array or enum cache
-  // sp[3] : 0 or map
-  // sp[4] : enumerable
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->SetExpectedHeight();
-  node->continue_target()->SetExpectedHeight();
-
-  // Load the current count to r0, load the length to r1.
-  __ Ldrd(r0, r1, frame_->ElementAt(0));
-  __ cmp(r0, r1);  // compare to the array length
-  node->break_target()->Branch(hs);
-
-  // Get the i'th entry of the array.
-  __ ldr(r2, frame_->ElementAt(2));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-  // Get Map or 0.
-  __ ldr(r2, frame_->ElementAt(3));
-  // Check if this (still) matches the map of the enumerable.
-  // If not, we have to filter the key.
-  __ ldr(r1, frame_->ElementAt(4));
-  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(r2));
-  end_del_check.Branch(eq);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(r0);
-  frame_->EmitPush(r3);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
-  __ mov(r3, Operand(r0), SetCC);
-  // If the property has been removed while iterating, we just skip it.
-  node->continue_target()->Branch(eq);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  r3: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(r3);  // push entry
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    Reference each(this, node->each());
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        // Loading a reference may leave the frame in an unspilled state.
-        frame_->SpillAll();  // Sync stack to memory.
-        // Get the value (under the reference on the stack) from memory.
-        __ ldr(r0, frame_->ElementAt(each.size()));
-        frame_->EmitPush(r0);
-        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
-        frame_->Drop(2);  // The result of the set and the extra pushed value.
-      } else {
-        // If the reference was to a slot we rely on the convenient property
-        // that it doesn't matter whether a value (eg, ebx pushed above) is
-        // right on top of or right underneath a zero-sized reference.
-        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
-        frame_->Drop(1);  // Drop the result of the set operation.
-      }
-    }
-  }
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    Visit(node->body());
-  }
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(r0);
-  __ add(r0, r0, Operand(Smi::FromInt(1)));
-  frame_->EmitPush(r0);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(r0);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->catch_block()->statements());
-  }
-  if (frame_ != NULL) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the labels for all escapes from the try block, including
-  // returns. During shadowing, the original label is hidden as the
-  // LabelShadow and operations on the original actually affect the
-  // shadowing label.
-  //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->try_block()->statements());
-  }
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(r1);  // r0 can contain the return value.
-    __ mov(r3, Operand(handler_address));
-    __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing labels that have been
-  // jumped to.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain;
-      shadows[i]->Bind();
-      // Because we can be jumping here (to spilled code) from unspilled
-      // code, we need to reestablish a spilled frame at this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(r3, Operand(handler_address));
-      __ ldr(sp, MemOperand(r3));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(r1);  // r0 can contain the return value.
-      __ str(r1, MemOperand(r3));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
-        frame_->PrepareForReturn();
-      }
-      shadows[i]->other_target()->Jump();
-    }
-  }
-
-  exit.Bind();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(r0);  // save exception object on the stack
-  // In case of thrown exceptions, this is where we continue.
-  __ mov(r2, Operand(Smi::FromInt(THROWING)));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the labels for all escapes from the try block, including
-  // returns.  Shadowing hides the original label as the LabelShadow and
-  // operations on the original actually affect the shadowing label.
-  //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->try_block()->statements());
-  }
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(r1);
-    __ mov(r3, Operand(handler_address));
-    __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in r2, then jump around the unlink blocks if any.
-    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    frame_->EmitPush(r0);
-    __ mov(r2, Operand(Smi::FromInt(FALLING)));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // in (a non-refcounted reference to) r0.  We must preserve it
-      // until it is pushed.
-      //
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      shadows[i]->Bind();
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ mov(r3, Operand(handler_address));
-      __ ldr(sp, MemOperand(r3));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.  The next
-      // handler address is currently on top of the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(r1);
-      __ str(r1, MemOperand(r3));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this label shadowed the function return, materialize the
-        // return value on the stack.
-        frame_->EmitPush(r0);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-        frame_->EmitPush(r0);
-      }
-      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(r2);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->finally_block()->statements());
-  }
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(r2);
-    frame_->EmitPop(r0);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      JumpTarget* original = shadows[i]->other_target();
-      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
-      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
-        JumpTarget skip;
-        skip.Branch(ne);
-        frame_->PrepareForReturn();
-        original->Jump();
-        skip.Bind();
-      } else {
-        original->Branch(eq);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
-    exit.Branch(ne);
-
-    // Rethrow exception.
-    frame_->EmitPush(r0);
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ DebuggerStatament");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  frame_->DebugBreak();
-#endif
-  // Ignore the return value.
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame_->EmitPush(Operand(function_info));
-    frame_->SpillAll();
-    frame_->CallStub(&stub, 1);
-    frame_->EmitPush(r0);
-  } else {
-    // Create a new closure.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(function_info));
-    frame_->EmitPush(Operand(pretenure
-                             ? FACTORY->true_value()
-                             : FACTORY->false_value()));
-    frame_->CallRuntime(Runtime::kNewClosure, 3);
-    frame_->EmitPush(r0);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    ASSERT(frame_->height() == original_height);
-    return;
-  }
-  InstantiateFunction(function_info, node->pretenure());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  InstantiateFunction(node->shared_function_info(), false);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then;
-  JumpTarget else_;
-  LoadCondition(node->condition(), &then, &else_, true);
-  if (has_valid_frame()) {
-    Branch(false, &else_);
-  }
-  if (has_valid_frame() || then.is_linked()) {
-    then.Bind();
-    Load(node->then_expression());
-  }
-  if (else_.is_linked()) {
-    JumpTarget exit;
-    if (has_valid_frame()) exit.Jump();
-    else_.Bind();
-    Load(node->else_expression());
-    if (exit.is_linked()) exit.Bind();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // JumpTargets do not yet support merging frames so the frame must be
-    // spilled when jumping to these targets.
-    JumpTarget slow;
-    JumpTarget done;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(slot->var()->name()));
-
-    if (typeof_state == INSIDE_TYPEOF) {
-      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind();
-    frame_->EmitPush(r0);
-
-  } else {
-    Register scratch = VirtualFrame::scratch0();
-    TypeInfo info = type_info(slot);
-    frame_->EmitPush(SlotOperand(slot, scratch), info);
-
-    if (slot->var()->mode() == Variable::CONST) {
-      // Const slots may contain 'the hole' value (the constant hasn't been
-      // initialized yet) which needs to be converted into the 'undefined'
-      // value.
-      Comment cmnt(masm_, "[ Unhole const");
-      Register tos = frame_->PopToRegister();
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(tos, ip);
-      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
-      frame_->EmitPush(tos);
-    }
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Load the loaded value from the stack into a register but leave it on the
-  // stack.
-  Register tos = frame_->Peek();
-
-  // If the loaded value is the sentinel that indicates that we
-  // haven't loaded the arguments object yet, we need to do it now.
-  JumpTarget exit;
-  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-  __ cmp(tos, ip);
-  exit.Branch(ne);
-  frame_->Drop();
-  StoreArgumentsObject(false);
-  exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  ASSERT(slot != NULL);
-  VirtualFrame::RegisterAllocationScope scope(this);
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(slot->var()->name()));
-
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize
-      // const properties (introduced via eval("const foo = (some
-      // expr);")). Also, uses the current function context instead of
-      // the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the
-      // same time, because the const declaration may be at the end of
-      // the eval code (sigh...) and the const variable may have been
-      // used before (where its value is 'undefined'). Thus, we can only
-      // do the initialization when we actually encounter the expression
-      // and when the expression operands are defined and valid, and
-      // thus we need the split into 2 operations: declaration of the
-      // context slot followed by initialization.
-      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-      frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling assignment expressions.
-    frame_->EmitPush(r0);
-
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-    Register scratch = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // The frame must be spilled when branching to this target.
-    JumpTarget exit;
-
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is
-      // executed, the code is identical to a normal store (see below).
-      Comment cmnt(masm_, "[ Init const");
-      __ ldr(scratch, SlotOperand(slot, scratch));
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(scratch, ip);
-      exit.Branch(ne);
-    }
-
-    // We must execute the store.  Storing a variable must keep the
-    // (new) value on the stack. This is necessary for compiling
-    // assignment expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will
-    // initialize consts to 'the hole' value and by doing so, end up
-    // calling this code.  r2 may be loaded with context; used below in
-    // RecordWrite.
-    Register tos = frame_->Peek();
-    __ str(tos, SlotOperand(slot, scratch));
-    if (slot->type() == Slot::CONTEXT) {
-      // Skip write barrier if the written value is a smi.
-      __ tst(tos, Operand(kSmiTagMask));
-      // We don't use tos any more after here.
-      exit.Branch(eq);
-      // scratch is loaded with context when calling SlotOperand above.
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      // We need an extra register.  Until we have a way to do that in the
-      // virtual frame we will cheat and ask for a free TOS register.
-      Register scratch3 = frame_->GetTOSRegister();
-      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
-    }
-    // If we definitely did not jump over the assignment, we do not need
-    // to bind the exit label.  Doing so can defeat peephole
-    // optimization.
-    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
-      exit.Bind();
-    }
-  }
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      JumpTarget* slow) {
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register tmp = frame_->scratch0();
-  Register tmp2 = frame_->scratch1();
-  Register context = cp;
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        frame_->SpillAll();
-        // Check that extension is NULL.
-        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-        __ tst(tmp2, tmp2);
-        slow->Branch(ne);
-      }
-      // Load next context in chain.
-      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-      context = tmp;
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    frame_->SpillAll();
-    Label next, fast;
-    __ Move(tmp, context);
-    __ bind(&next);
-    // Terminate at global context.
-    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
-    __ cmp(tmp2, ip);
-    __ b(eq, &fast);
-    // Check that extension is NULL.
-    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
-    __ tst(tmp2, tmp2);
-    slow->Branch(ne);
-    // Load next context in chain.
-    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
-    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-    __ b(&next);
-    __ bind(&fast);
-  }
-
-  // Load the global object.
-  LoadGlobal();
-  // Setup the name register and call load IC.
-  frame_->CallLoadIC(slot->var()->name(),
-                     typeof_state == INSIDE_TYPEOF
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT);
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    frame_->SpillAll();
-    done->Jump();
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    frame_->SpillAll();
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      __ ldr(r0,
-             ContextSlotOperandCheckExtensions(potential_slot,
-                                               r1,
-                                               r2,
-                                               slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-        __ cmp(r0, ip);
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      }
-      done->Jump();
-    } else if (rewrite != NULL) {
-      // Generate fast case for argument loads.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          __ ldr(r0,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   r1,
-                                                   r2,
-                                                   slow));
-          frame_->EmitPush(r0);
-          __ mov(r1, Operand(key_literal->handle()));
-          frame_->EmitPush(r1);
-          EmitKeyedLoad();
-          done->Jump();
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ VariableProxy");
-
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Literal");
-  Register reg = frame_->GetTOSRegister();
-  bool is_smi = node->handle()->IsSmi();
-  __ mov(reg, Operand(node->handle()));
-  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ RexExp Literal");
-
-  Register tmp = VirtualFrame::scratch0();
-  // Free up a TOS register that can be used to push the literal.
-  Register literal = frame_->GetTOSRegister();
-
-  // Retrieve the literal array and check the allocated entry.
-
-  // Load the function of this activation.
-  __ ldr(tmp, frame_->Function());
-
-  // Load the literals array of the function.
-  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
-
-  JumpTarget materialized;
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(literal, ip);
-  // This branch locks the virtual frame at the done label to match the
-  // one we have here, where the literal register is not on the stack and
-  // nothing is spilled.
-  materialized.Branch(ne);
-
-  // If the entry is undefined we call the runtime system to compute
-  // the literal.
-  // literal array  (0)
-  frame_->EmitPush(tmp);
-  // literal index  (1)
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  // RegExp pattern (2)
-  frame_->EmitPush(Operand(node->pattern()));
-  // RegExp flags   (3)
-  frame_->EmitPush(Operand(node->flags()));
-  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  __ Move(literal, r0);
-
-  materialized.Bind();
-
-  frame_->EmitPush(literal);
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-  frame_->EmitPush(Operand(Smi::FromInt(size)));
-  frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
-  // r0 is newly allocated space.
-
-  // Reuse literal variable with (possibly) a new register, still holding
-  // the materialized boilerplate.
-  literal = frame_->PopToRegister(r0);
-
-  __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
-
-  // Push the clone.
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  Register literal = frame_->GetTOSRegister();
-  // Load the function of this activation.
-  __ ldr(literal, frame_->Function());
-  // Literal array.
-  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
-  frame_->EmitPush(literal);
-  // Literal index.
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  // Constant properties.
-  frame_->EmitPush(Operand(node->constant_properties()));
-  // Should the object literal have fast elements?
-  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
-  if (node->depth() > 1) {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->EmitPush(r0);  // save the result
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    // At the start of each iteration, the top of stack contains
-    // the newly created object literal.
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    Literal* key = property->key();
-    Expression* value = property->value();
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through
-      case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsSymbol()) {
-          Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-              Builtins::kStoreIC_Initialize));
-          Load(value);
-          if (property->emit_store()) {
-            frame_->PopToR0();
-            // Fetch the object literal.
-            frame_->SpillAllButCopyTOSToR1();
-            __ mov(r2, Operand(key->handle()));
-            frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-          } else {
-            frame_->Drop();
-          }
-          break;
-        }
-        // else fall through
-      case ObjectLiteral::Property::PROTOTYPE: {
-        frame_->Dup();
-        Load(key);
-        Load(value);
-        if (property->emit_store()) {
-          frame_->EmitPush(Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-          frame_->CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          frame_->Drop(3);
-        }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        frame_->Dup();
-        Load(key);
-        frame_->EmitPush(Operand(Smi::FromInt(1)));
-        Load(value);
-        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        frame_->Dup();
-        Load(key);
-        frame_->EmitPush(Operand(Smi::FromInt(0)));
-        Load(value);
-        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        break;
-      }
-    }
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  Register tos = frame_->GetTOSRegister();
-  // Load the function of this activation.
-  __ ldr(tos, frame_->Function());
-  // Load the literals array of the function.
-  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
-  frame_->EmitPush(tos);
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  frame_->EmitPush(Operand(node->constant_elements()));
-  int length = node->values()->length();
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    frame_->CallStub(&stub, 3);
-    __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
-                        1, r1, r2);
-  } else if (node->depth() > 1) {
-    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    frame_->CallStub(&stub, 3);
-  }
-  frame_->EmitPush(r0);  // save the result
-  // r0: created object literal
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < node->values()->length(); i++) {
-    Expression* value = node->values()->at(i);
-
-    // If value is a literal the property value is already set in the
-    // boilerplate object.
-    if (value->AsLiteral() != NULL) continue;
-    // If value is a materialized literal the property value is already set
-    // in the boilerplate object if it is simple.
-    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
-    // The property must be set by generated code.
-    Load(value);
-    frame_->PopToR0();
-    // Fetch the object literal.
-    frame_->SpillAllButCopyTOSToR1();
-
-    // Get the elements array.
-    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ str(r0, FieldMemOperand(r1, offset));
-
-    // Update the write barrier for the array address.
-    __ RecordWrite(r1, Operand(offset), r3, r2);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-  } else {
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else {
-      frame_->Dup();
-    }
-    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame_->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else if (var != NULL) {
-      LoadGlobal();
-    } else {
-      frame_->Dup();
-    }
-    EmitNamedLoad(name, var != NULL);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    // Load the receiver and swap with the value.
-    Load(prop->obj());
-    Register t0 = frame_->PopToRegister();
-    Register t1 = frame_->PopToRegister(t0);
-    frame_->EmitPush(t0);
-    frame_->EmitPush(t1);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  EmitNamedStore(name, is_contextual);
-  frame_->EmitPush(r0);
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment. Swap
-      // the receiver and the value of the assignment expression.
-      Register t0 = frame_->PopToRegister();
-      Register t1 = frame_->PopToRegister(t0);
-      frame_->EmitPush(t0);
-      frame_->EmitPush(t1);
-    }
-    frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  WriteBarrierCharacter wb_info;
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-  //
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame_->Dup2();
-    EmitKeyedLoad();
-    frame_->EmitPush(r0);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-    wb_info = node->value()->AsLiteral() != NULL ?
-        NEVER_NEWSPACE :
-        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  EmitKeyedStore(prop->key()->type(), wb_info);
-  frame_->EmitPush(r0);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Register t0 = frame_->PopToRegister();
-    Register t1 = frame_->PopToRegister(t0);
-    frame_->EmitPush(t1);
-    frame_->EmitPush(t0);
-    frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Assignment");
-
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Throw");
-
-  Load(node->exception());
-  CodeForSourcePosition(node->position());
-  frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Property");
-
-  { Reference property(this, node);
-    property.GetValue();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Call");
-
-  Expression* function = node->expression();
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Standard function call.
-  // Check if the function is a variable or a property.
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare stack for call to resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-    }
-
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                        NOT_INSIDE_TYPEOF,
-                                        &slow);
-      frame_->EmitPush(r0);
-      if (arg_count > 0) {
-        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-        frame_->EmitPush(r1);
-      } else {
-        frame_->EmitPush(r2);
-      }
-      __ ldr(r1, frame_->Receiver());
-      frame_->EmitPush(r1);
-
-      // Push the strict mode flag.
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump();
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
-    frame_->EmitPush(r1);
-    if (arg_count > 0) {
-      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-      frame_->EmitPush(r1);
-    } else {
-      frame_->EmitPush(r2);
-    }
-    __ ldr(r1, frame_->Receiver());
-    frame_->EmitPush(r1);
-
-    // Push the strict mode flag.
-    frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-    // Resolve the call.
-    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind();
-
-    // Touch up stack with the right values for the function and the receiver.
-    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    frame_->CallStub(&call_function, arg_count + 1);
-
-    __ ldr(cp, frame_->Context());
-    // Remove the function from the stack.
-    frame_->Drop();
-    frame_->EmitPush(r0);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-    }
-
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-    // Setup the name register and call the IC initialization code.
-    __ mov(r2, Operand(var->name()));
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    CodeForSourcePosition(node->position());
-    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
-                           arg_count + 1);
-    __ ldr(cp, frame_->Context());
-    frame_->EmitPush(r0);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Load the function
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(var->name()));
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // r0: slot value; r1: receiver
-
-    // Load the receiver.
-    frame_->EmitPush(r0);  // function
-    frame_->EmitPush(r1);  // receiver
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind();
-      frame_->EmitPush(r0);  // function
-      LoadGlobalReceiver(VirtualFrame::scratch0());  // receiver
-      call.Bind();
-    }
-
-    // Call the function. At this point, everything is spilled but the
-    // function and receiver are in r0 and r1.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-    frame_->EmitPush(r0);
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        Load(property->obj());  // Receiver.
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-        }
-
-        VirtualFrame::SpilledScope spilled_scope(frame_);
-        // Set the name register and call the IC initialization code.
-        __ mov(r2, Operand(name));
-        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-        Handle<Code> stub =
-            ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-        CodeForSourcePosition(node->position());
-        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-        __ ldr(cp, frame_->Context());
-        frame_->EmitPush(r0);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the receiver and name of the function.
-      Load(property->obj());
-      Load(property->key());
-
-      if (property->is_synthetic()) {
-        EmitKeyedLoad();
-        // Put the function below the receiver.
-        // Use the global receiver.
-        frame_->EmitPush(r0);  // Function.
-        LoadGlobalReceiver(VirtualFrame::scratch0());
-        // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-        frame_->EmitPush(r0);
-      } else {
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Register key = frame_->PopToRegister();
-        Register receiver = frame_->PopToRegister(key);
-        frame_->EmitPush(key);
-        frame_->EmitPush(receiver);
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-        }
-
-        // Load the key into r2 and call the IC initialization code.
-        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-        Handle<Code> stub =
-            ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
-                                                              in_loop);
-        CodeForSourcePosition(node->position());
-        frame_->SpillAll();
-        __ ldr(r2, frame_->ElementAt(arg_count + 1));
-        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-        frame_->Drop();  // Drop the key still on the stack.
-        __ ldr(cp, frame_->Context());
-        frame_->EmitPush(r0);
-      }
-    }
-
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver(VirtualFrame::scratch0());
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Spill everything from here to simplify the implementation.
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  // Load the argument count into r0 and the function into r1 as per
-  // calling convention.
-  __ mov(r0, Operand(arg_count));
-  __ ldr(r1, frame_->ElementAt(arg_count));
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
-  frame_->EmitPush(r0);
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  Register scratch = VirtualFrame::scratch0();
-  JumpTarget null, function, leave, non_function_constructor;
-
-  // Load the object into register.
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register tos = frame_->PopToRegister();
-
-  // If the object is a smi, we return null.
-  __ tst(tos, Operand(kSmiTagMask));
-  null.Branch(eq);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-  __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
-  null.Branch(lt);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
-  function.Branch(eq);
-
-  // Check if the constructor in the map is a function.
-  __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
-  __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
-  non_function_constructor.Branch(ne);
-
-  // The tos register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(tos,
-         FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  __ mov(tos, Operand(FACTORY->function_class_symbol()));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  __ mov(tos, Operand(FACTORY->Object_symbol()));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  __ LoadRoot(tos, Heap::kNullValueRootIndex);
-  frame_->EmitPush(tos);
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  Register scratch = VirtualFrame::scratch0();
-  JumpTarget leave;
-
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register tos = frame_->PopToRegister();  // tos contains object.
-  // if (object->IsSmi()) return the object.
-  __ tst(tos, Operand(kSmiTagMask));
-  leave.Branch(eq);
-  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
-  leave.Branch(ne);
-  // Load the value.
-  __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
-  leave.Bind();
-  frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  JumpTarget leave;
-
-  ASSERT(args->length() == 2);
-  Load(args->at(0));    // Load the object.
-  Load(args->at(1));    // Load the value.
-  Register value = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(value);
-  // if (object->IsSmi()) return object.
-  __ tst(object, Operand(kSmiTagMask));
-  leave.Branch(eq);
-  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
-  leave.Branch(ne);
-  // Store the value.
-  __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
-  // Update the write barrier.
-  __ RecordWrite(object,
-                 Operand(JSValue::kValueOffset - kHeapObjectTag),
-                 scratch1,
-                 scratch2);
-  // Leave.
-  leave.Bind();
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register reg = frame_->PopToRegister();
-  __ tst(reg, Operand(kSmiTagMask));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register reg = frame_->PopToRegister();
-  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
-  cc_reg_ = eq;
-}
-
-
-// Generates the Math.pow method.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-
-  if (!CpuFeatures::IsSupported(VFP3)) {
-    frame_->CallRuntime(Runtime::kMath_pow, 2);
-    frame_->EmitPush(r0);
-  } else {
-    CpuFeatures::Scope scope(VFP3);
-    JumpTarget runtime, done;
-    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // Get base and exponent to registers.
-    Register exponent = frame_->PopToRegister();
-    Register base = frame_->PopToRegister(exponent);
-    Register heap_number_map = no_reg;
-
-    // Set the frame for the runtime jump target. The code below jumps to the
-    // jump target label so the frame needs to be established before that.
-    ASSERT(runtime.entry_frame() == NULL);
-    runtime.set_entry_frame(frame_);
-
-    __ JumpIfNotSmi(exponent, &exponent_nonsmi);
-    __ JumpIfNotSmi(base, &base_nonsmi);
-
-    heap_number_map = r6;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Exponent is a smi and base is a smi. Get the smi value into vfp register
-    // d1.
-    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
-    __ b(&powi);
-
-    __ bind(&base_nonsmi);
-    // Exponent is smi and base is non smi. Get the double value from the base
-    // into vfp register d1.
-    __ ObjectToDoubleVFPRegister(base, d1,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label());
-
-    __ bind(&powi);
-
-    // Load 1.0 into d0.
-    __ vmov(d0, 1.0);
-
-    // Get the absolute untagged value of the exponent and use that for the
-    // calculation.
-    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
-    // Negate if negative.
-    __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
-    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
-
-    // Run through all the bits in the exponent. The result is calculated in d0
-    // and d1 holds base^(bit^2).
-    Label more_bits;
-    __ bind(&more_bits);
-    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
-    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
-    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
-    __ b(ne, &more_bits);
-
-    // If exponent is positive we are done.
-    __ cmp(exponent, Operand(0, RelocInfo::NONE));
-    __ b(ge, &allocate_return);
-
-    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
-    // case). However if d0 has reached infinity this will not provide the
-    // correct result, so call runtime if that is the case.
-    __ mov(scratch2, Operand(0x7FF00000));
-    __ mov(scratch1, Operand(0, RelocInfo::NONE));
-    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
-    __ VFPCompareAndSetFlags(d0, d1);
-    runtime.Branch(eq);  // d0 reached infinity.
-    __ vdiv(d0, d2, d0);
-    __ b(&allocate_return);
-
-    __ bind(&exponent_nonsmi);
-    // Special handling of raising to the power of -0.5 and 0.5. First check
-    // that the value is a heap number and that the lower bits (which for both
-    // values are zero).
-    heap_number_map = r6;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
-    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
-    __ cmp(scratch1, heap_number_map);
-    runtime.Branch(ne);
-    __ tst(scratch2, scratch2);
-    runtime.Branch(ne);
-
-    // Load the higher bits (which contains the floating point exponent).
-    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
-
-    // Compare exponent with -0.5.
-    __ cmp(scratch1, Operand(0xbfe00000));
-    __ b(ne, &not_minus_half);
-
-    // Get the double value from the base into vfp register d0.
-    __ ObjectToDoubleVFPRegister(base, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label(),
-                                 AVOID_NANS_AND_INFINITIES);
-
-    // Convert -0 into +0 by adding +0.
-    __ vmov(d2, 0.0);
-    __ vadd(d0, d2, d0);
-    // Load 1.0 into d2.
-    __ vmov(d2, 1.0);
-
-    // Calculate the reciprocal of the square root.
-    __ vsqrt(d0, d0);
-    __ vdiv(d0, d2, d0);
-
-    __ b(&allocate_return);
-
-    __ bind(&not_minus_half);
-    // Compare exponent with 0.5.
-    __ cmp(scratch1, Operand(0x3fe00000));
-    runtime.Branch(ne);
-
-      // Get the double value from the base into vfp register d0.
-    __ ObjectToDoubleVFPRegister(base, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label(),
-                                 AVOID_NANS_AND_INFINITIES);
-    // Convert -0 into +0 by adding +0.
-    __ vmov(d2, 0.0);
-    __ vadd(d0, d2, d0);
-    __ vsqrt(d0, d0);
-
-    __ bind(&allocate_return);
-    Register scratch3 = r5;
-    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
-                                   heap_number_map, runtime.entry_label());
-    __ mov(base, scratch3);
-    done.Jump();
-
-    runtime.Bind();
-
-    // Push back the arguments again for the runtime call.
-    frame_->EmitPush(base);
-    frame_->EmitPush(exponent);
-    frame_->CallRuntime(Runtime::kMath_pow, 2);
-    __ Move(base, r0);
-
-    done.Bind();
-    frame_->EmitPush(base);
-  }
-}
-
-
-// Generates the Math.sqrt method.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-
-  if (!CpuFeatures::IsSupported(VFP3)) {
-    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
-    frame_->EmitPush(r0);
-  } else {
-    CpuFeatures::Scope scope(VFP3);
-    JumpTarget runtime, done;
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // Get the value from the frame.
-    Register tos = frame_->PopToRegister();
-
-    // Set the frame for the runtime jump target. The code below jumps to the
-    // jump target label so the frame needs to be established before that.
-    ASSERT(runtime.entry_frame() == NULL);
-    runtime.set_entry_frame(frame_);
-
-    Register heap_number_map = r6;
-    Register new_heap_number = r5;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Get the double value from the heap number into vfp register d0.
-    __ ObjectToDoubleVFPRegister(tos, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label());
-
-    // Calculate the square root of d0 and place result in a heap number object.
-    __ vsqrt(d0, d0);
-    __ AllocateHeapNumberWithValue(new_heap_number,
-                                   d0,
-                                   scratch1, scratch2,
-                                   heap_number_map,
-                                   runtime.entry_label());
-    __ mov(tos, Operand(new_heap_number));
-    done.Jump();
-
-    runtime.Bind();
-    // Push back the argument again for the runtime call.
-    frame_->EmitPush(tos);
-    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
-    __ Move(tos, r0);
-
-    done.Bind();
-    frame_->EmitPush(tos);
-  }
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ LoadRoot(result_, Heap::kNanValueRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Register index = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(index);
-
-  // We need two extra registers.
-  Register scratch = VirtualFrame::scratch0();
-  Register result = VirtualFrame::scratch1();
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object,
-                                   index,
-                                   scratch,
-                                   result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Register result = frame_->GetTOSRegister();
-  Register code = frame_->PopToRegister(result);
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code, result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ mov(result_, Operand(Smi::FromInt(0)));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Register index = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(index);
-
-  // We need three extra registers.
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  // Use r6 without notifying the virtual frame.
-  Register result = r6;
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object,
-                               index,
-                               scratch1,
-                               scratch2,
-                               result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  JumpTarget answer;
-  // We need the CC bits to come out as not_equal in the case where the
-  // object is a smi.  This can't be done with the usual test opcode so
-  // we use XOR to get the right CC bits.
-  Register possible_array = frame_->PopToRegister();
-  Register scratch = VirtualFrame::scratch0();
-  __ and_(scratch, possible_array, Operand(kSmiTagMask));
-  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
-  answer.Branch(ne);
-  // It is a heap object - get the map. Check if the object is a JS array.
-  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
-  answer.Bind();
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  JumpTarget answer;
-  // We need the CC bits to come out as not_equal in the case where the
-  // object is a smi.  This can't be done with the usual test opcode so
-  // we use XOR to get the right CC bits.
-  Register possible_regexp = frame_->PopToRegister();
-  Register scratch = VirtualFrame::scratch0();
-  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
-  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
-  answer.Branch(ne);
-  // It is a heap object - get the map. Check if the object is a regexp.
-  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
-  answer.Bind();
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_object = frame_->PopToRegister();
-  __ tst(possible_object, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(possible_object, ip);
-  true_target()->Branch(eq);
-
-  Register map_reg = VirtualFrame::scratch0();
-  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
-  false_target()->Branch(ne);
-
-  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
-  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
-  false_target()->Branch(lt);
-  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
-  cc_reg_ = le;
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  __ tst(value, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  // Check that this is an object.
-  __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
-  __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
-  cc_reg_ = ge;
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
-      __ cmp(map_result_, ip);
-      __ Assert(eq, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
-    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-    __ cmp(scratch1_, ip);
-    __ b(eq, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ ldr(map_result_,
-           FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch2_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kPointerSize == 4);
-    __ add(scratch1_,
-           map_result_,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ add(scratch1_,
-           scratch1_,
-           Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-    // Calculate location of the first key name.
-    __ add(map_result_,
-           map_result_,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag +
-                   DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    // The use of ip to store the valueOf symbol asumes that it is not otherwise
-    // used in the loop below.
-    __ mov(ip, Operand(FACTORY->value_of_symbol()));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ ldr(scratch2_, MemOperand(map_result_, 0));
-    __ cmp(scratch2_, ip);
-    __ b(eq, &false_result);
-    __ add(map_result_, map_result_, Operand(kPointerSize));
-    __ bind(&entry);
-    __ cmp(map_result_, Operand(scratch1_));
-    __ b(ne, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
-    __ tst(scratch1_, Operand(kSmiTagMask));
-    __ b(eq, &false_result);
-    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
-    __ ldr(scratch2_,
-           ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ ldr(scratch2_,
-           FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ ldr(scratch2_,
-           ContextOperand(
-               scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ cmp(scratch1_, scratch2_);
-    __ b(ne, &false_result);
-
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
-    __ orr(scratch1_,
-           scratch1_,
-           Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
-    __ mov(map_result_, Operand(1));
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ mov(map_result_, Operand(0, RelocInfo::NONE));
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register obj = frame_->PopToRegister();  // Pop the string wrapper.
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj);
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Register map_result = VirtualFrame::scratch0();
-  __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
-  __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
-  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  true_target()->Branch(ne);
-
-  // We need an additional two scratch registers for the deferred code.
-  Register scratch1 = VirtualFrame::scratch1();
-  // Use r6 without notifying the virtual frame.
-  Register scratch2 = r6;
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj, map_result, scratch1, scratch2);
-  deferred->Branch(eq);
-  deferred->BindExit();
-  __ tst(map_result, Operand(map_result));
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_function = frame_->PopToRegister();
-  __ tst(possible_function, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  Register map_reg = VirtualFrame::scratch0();
-  Register scratch = VirtualFrame::scratch1();
-  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_undetectable = frame_->PopToRegister();
-  __ tst(possible_undetectable, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  Register scratch = VirtualFrame::scratch0();
-  __ ldr(scratch,
-         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
-  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Register scratch0 = VirtualFrame::scratch0();
-  Register scratch1 = VirtualFrame::scratch1();
-  // Get the frame pointer for the calling frame.
-  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ ldr(scratch0,
-         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
-
-  // Check the marker in the calling frame.
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Register tos = frame_->GetTOSRegister();
-  Register scratch0 = VirtualFrame::scratch0();
-  Register scratch1 = VirtualFrame::scratch1();
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(scratch0,
-         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Get the number of formal parameters.
-  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ ldr(tos,
-         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
-         eq);
-
-  frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Satisfy contract with ArgumentsAccessStub:
-  // Load the key into r1 and the formal parameters count into r0.
-  Load(args->at(0));
-  frame_->PopToR1();
-  frame_->SpillAll();
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  frame_->CallStub(&stub, 0);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  ASSERT(args->length() == 0);
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-
-  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ mov(r4, Operand(r0));
-
-  __ bind(&heapnumber_allocated);
-
-  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (CpuFeatures::IsSupported(VFP3)) {
-    __ PrepareCallCFunction(1, r0);
-    __ mov(r0, Operand(ExternalReference::isolate_address()));
-    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
-    CpuFeatures::Scope scope(VFP3);
-    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
-    // Create this constant using mov/orr to avoid PC relative load.
-    __ mov(r1, Operand(0x41000000));
-    __ orr(r1, r1, Operand(0x300000));
-    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
-    __ vmov(d7, r0, r1);
-    // Move 0x4130000000000000 to VFP.
-    __ mov(r0, Operand(0, RelocInfo::NONE));
-    __ vmov(d8, r0, r1);
-    // Subtract and store the result in the heap number.
-    __ vsub(d7, d7, d8);
-    __ sub(r0, r4, Operand(kHeapObjectTag));
-    __ vstr(d7, r0, HeapNumber::kValueOffset);
-    frame_->EmitPush(r4);
-  } else {
-    __ PrepareCallCFunction(2, r0);
-    __ mov(r0, Operand(r4));
-    __ mov(r1, Operand(ExternalReference::isolate_address()));
-    __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
-    frame_->EmitPush(r0);
-  }
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 2);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 3);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 2);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(4, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-  RegExpExecStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 4);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-  RegExpConstructResultStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 3);
-  frame_->EmitPush(r0);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
-  __ Push(cache_, key_);
-  __ CallRuntime(Runtime::kGetFromCache, 2);
-  __ Move(dst_, r0);
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      Isolate::Current()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-    return;
-  }
-
-  Load(args->at(1));
-
-  frame_->PopToR1();
-  frame_->SpillAll();
-  Register key = r1;  // Just poped to r1
-  Register result = r0;  // Free, as frame has just been spilled.
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-
-  __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(scratch1,
-         FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
-  __ ldr(scratch1,
-         ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ ldr(scratch1,
-         FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
-
-  DeferredSearchCache* deferred =
-      new DeferredSearchCache(result, scratch1, key);
-
-  const int kFingerOffset =
-      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
-  // result now holds finger offset as a smi.
-  __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // scratch2 now points to the start of fixed array elements.
-  __ ldr(result,
-         MemOperand(
-             scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
-  // Note side effect of PreIndex: scratch2 now points to the key of the pair.
-  __ cmp(key, result);
-  deferred->Branch(ne);
-
-  __ ldr(result, MemOperand(scratch2, kPointerSize));
-
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and jump to the runtime.
-  Load(args->at(0));
-
-  NumberToStringStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 1);
-  frame_->EmitPush(r0);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  Register index2 = r2;
-  Register index1 = r1;
-  Register object = r0;
-  Register tmp1 = r3;
-  Register tmp2 = r4;
-
-  frame_->EmitPop(index2);
-  frame_->EmitPop(index1);
-  frame_->EmitPop(object);
-
-  DeferredSwapElements* deferred =
-      new DeferredSwapElements(object, index1, index2);
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
-  deferred->Branch(ne);
-  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
-  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(ne);
-
-  // Check the object's elements are in fast case and writable.
-  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(tmp2, ip);
-  deferred->Branch(ne);
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  // Check that both indices are smis.
-  __ mov(tmp2, index1);
-  __ orr(tmp2, tmp2, index2);
-  __ tst(tmp2, Operand(kSmiTagMask));
-  deferred->Branch(ne);
-
-  // Check that both indices are valid.
-  __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
-  __ cmp(tmp2, index1);
-  __ cmp(tmp2, index2, hi);
-  deferred->Branch(ls);
-
-  // Bring the offsets into the fixed array in tmp1 into index1 and
-  // index2.
-  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-  // Swap elements.
-  Register tmp3 = object;
-  object = no_reg;
-  __ ldr(tmp3, MemOperand(tmp1, index1));
-  __ ldr(tmp2, MemOperand(tmp1, index2));
-  __ str(tmp3, MemOperand(tmp1, index2));
-  __ str(tmp2, MemOperand(tmp1, index1));
-
-  Label done;
-  __ InNewSpace(tmp1, tmp2, eq, &done);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  __ mov(tmp2, tmp1);
-  __ add(index1, index1, tmp1);
-  __ add(index2, index2, tmp1);
-  __ RecordWriteHelper(tmp1, index1, tmp3);
-  __ RecordWriteHelper(tmp2, index2, tmp3);
-  __ bind(&done);
-
-  deferred->BindExit();
-  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(tmp1);
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  frame_->CallJSFunction(n_args);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_sin, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::COS,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_cos, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_log, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Register lhs = frame_->PopToRegister();
-  Register rhs = frame_->PopToRegister(lhs);
-  __ cmp(lhs, rhs);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Register right = frame_->PopToRegister();
-  Register left = frame_->PopToRegister(right);
-  Register tmp = frame_->scratch0();
-  Register tmp2 = frame_->scratch1();
-
-  // Jumps to done must have the eq flag set if the test is successful
-  // and clear if the test has failed.
-  Label done;
-
-  // Fail if either is a non-HeapObject.
-  __ cmp(left, Operand(right));
-  __ b(eq, &done);
-  __ and_(tmp, left, Operand(right));
-  __ eor(tmp, tmp, Operand(kSmiTagMask));
-  __ tst(tmp, Operand(kSmiTagMask));
-  __ b(ne, &done);
-  __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
-  __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
-  __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
-  __ b(ne, &done);
-  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
-  __ cmp(tmp, Operand(tmp2));
-  __ b(ne, &done);
-  __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
-  __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
-  __ cmp(tmp, tmp2);
-  __ bind(&done);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  Register tmp = frame_->scratch0();
-  __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
-  __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-
-  __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
-  __ IndexFromHash(value, value);
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  if (CheckForInlineRuntimeCall(node)) {
-    ASSERT((has_cc() && frame_->height() == original_height) ||
-           (!has_cc() && frame_->height() == original_height + 1));
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Prepare stack for calling JS runtime function.
-    // Push the builtins object found in the current global object.
-    Register scratch = VirtualFrame::scratch0();
-    __ ldr(scratch, GlobalObjectOperand());
-    Register builtins = frame_->GetTOSRegister();
-    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
-    frame_->EmitPush(builtins);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    __ mov(r2, Operand(node->name()));
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-    __ ldr(cp, frame_->Context());
-    frame_->EmitPush(r0);
-  } else {
-    // Call the C runtime function.
-    frame_->CallRuntime(function, arg_count);
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    LoadCondition(node->expression(), false_target(), true_target(), true);
-    // LoadCondition may (and usually does) leave a test and branch to
-    // be emitted by the caller.  In that case, negate the condition.
-    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
-      frame_->EmitPush(r0);
-
-    } else if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->EmitPush(Operand(variable->name()));
-        frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
-        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
-        frame_->EmitPush(r0);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Delete from the context holding the named variable.
-        frame_->EmitPush(cp);
-        frame_->EmitPush(Operand(variable->name()));
-        frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->EmitPush(r0);
-
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-      }
-
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->Drop();
-      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->EmitPush(r0);  // r0 has result
-
-  } else {
-    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-    UnaryOverwriteMode overwrite =
-        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-
-    bool no_negative_zero = node->expression()->no_negative_zero();
-    Load(node->expression());
-    switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
-      case Token::SUB: {
-        frame_->PopToR0();
-        GenericUnaryOpStub stub(
-            Token::SUB,
-            overwrite,
-            NO_UNARY_FLAGS,
-            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-        frame_->CallStub(&stub, 0);
-        frame_->EmitPush(r0);  // r0 has result
-        break;
-      }
-
-      case Token::BIT_NOT: {
-        Register tos = frame_->PopToRegister();
-        JumpTarget not_smi_label;
-        JumpTarget continue_label;
-        // Smi check.
-        __ tst(tos, Operand(kSmiTagMask));
-        not_smi_label.Branch(ne);
-
-        __ mvn(tos, Operand(tos));
-        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
-        frame_->EmitPush(tos);
-        // The fast case is the first to jump to the continue label, so it gets
-        // to decide the virtual frame layout.
-        continue_label.Jump();
-
-        not_smi_label.Bind();
-        frame_->SpillAll();
-        __ Move(r0, tos);
-        GenericUnaryOpStub stub(Token::BIT_NOT,
-                                overwrite,
-                                NO_UNARY_SMI_CODE_IN_STUB);
-        frame_->CallStub(&stub, 0);
-        frame_->EmitPush(r0);
-
-        continue_label.Bind();
-        break;
-      }
-
-      case Token::VOID:
-        frame_->Drop();
-        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-        break;
-
-      case Token::ADD: {
-        Register tos = frame_->Peek();
-        // Smi check.
-        JumpTarget continue_label;
-        __ tst(tos, Operand(kSmiTagMask));
-        continue_label.Branch(eq);
-
-        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-        frame_->EmitPush(r0);
-
-        continue_label.Bind();
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
-  DeferredCountOperation(Register value,
-                         bool is_increment,
-                         bool is_postfix,
-                         int target_size)
-      : value_(value),
-        is_increment_(is_increment),
-        is_postfix_(is_postfix),
-        target_size_(target_size) {}
-
-  virtual void Generate() {
-    VirtualFrame copied_frame(*frame_state()->frame());
-
-    Label slow;
-    // Check for smi operand.
-    __ tst(value_, Operand(kSmiTagMask));
-    __ b(ne, &slow);
-
-    // Revert optimistic increment/decrement.
-    if (is_increment_) {
-      __ sub(value_, value_, Operand(Smi::FromInt(1)));
-    } else {
-      __ add(value_, value_, Operand(Smi::FromInt(1)));
-    }
-
-    // Slow case: Convert to number.  At this point the
-    // value to be incremented is in the value register..
-    __ bind(&slow);
-
-    // Convert the operand to a number.
-    copied_frame.EmitPush(value_);
-
-    copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-
-    if (is_postfix_) {
-      // Postfix: store to result (on the stack).
-      __ str(r0,  MemOperand(sp, target_size_ * kPointerSize));
-    }
-
-    copied_frame.EmitPush(r0);
-    copied_frame.EmitPush(Operand(Smi::FromInt(1)));
-
-    if (is_increment_) {
-      copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
-    } else {
-      copied_frame.CallRuntime(Runtime::kNumberSub, 2);
-    }
-
-    __ Move(value_, r0);
-
-    copied_frame.MergeTo(frame_state()->frame());
-  }
-
- private:
-  Register value_;
-  bool is_increment_;
-  bool is_postfix_;
-  int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CountOperation");
-  VirtualFrame::RegisterAllocationScope scope(this);
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
-
-  if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
-    // The type info declares that this variable is always a Smi.  That
-    // means it is a Smi both before and after the increment/decrement.
-    // Lets make use of that to make a very minimal count.
-    Reference target(this, node->expression(), !is_const);
-    ASSERT(!target.is_illegal());
-    target.GetValue();  // Pushes the value.
-    Register value = frame_->PopToRegister();
-    if (is_postfix) frame_->EmitPush(value);
-    if (is_increment) {
-      __ add(value, value, Operand(Smi::FromInt(1)));
-    } else {
-      __ sub(value, value, Operand(Smi::FromInt(1)));
-    }
-    frame_->EmitPush(value);
-    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
-    if (is_postfix) frame_->Pop();
-    ASSERT_EQ(original_height + 1, frame_->height());
-    return;
-  }
-
-  // If it's a postfix expression and its result is not ignored and the
-  // reference is non-trivial, then push a placeholder on the stack now
-  // to hold the result of the expression.
-  bool placeholder_pushed = false;
-  if (!is_slot && is_postfix) {
-    frame_->EmitPush(Operand(Smi::FromInt(0)));
-    placeholder_pushed = true;
-  }
-
-  // A constant reference is not saved to, so a constant reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
-      ASSERT_EQ(original_height + 1, frame_->height());
-      return;
-    }
-
-    // This pushes 0, 1 or 2 words on the object to be used later when updating
-    // the target.  It also pushes the current value of the target.
-    target.GetValue();
-
-    bool value_is_known_smi = frame_->KnownSmiAt(0);
-    Register value = frame_->PopToRegister();
-
-    // Postfix: Store the old value as the result.
-    if (placeholder_pushed) {
-      frame_->SetElementAt(value, target.size());
-    } else if (is_postfix) {
-      frame_->EmitPush(value);
-      __ mov(VirtualFrame::scratch0(), value);
-      value = VirtualFrame::scratch0();
-    }
-
-    // We can't use any type information here since the virtual frame from the
-    // deferred code may have lost information and we can't merge a virtual
-    // frame with less specific type knowledge to a virtual frame with more
-    // specific knowledge that has already used that specific knowledge to
-    // generate code.
-    frame_->ForgetTypeInfo();
-
-    // The constructor here will capture the current virtual frame and use it to
-    // merge to after the deferred code has run.  No virtual frame changes are
-    // allowed from here until the 'BindExit' below.
-    DeferredCode* deferred =
-        new DeferredCountOperation(value,
-                                   is_increment,
-                                   is_postfix,
-                                   target.size());
-    if (!value_is_known_smi) {
-      // Check for smi operand.
-      __ tst(value, Operand(kSmiTagMask));
-
-      deferred->Branch(ne);
-    }
-
-    // Perform optimistic increment/decrement.
-    if (is_increment) {
-      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
-    } else {
-      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
-    }
-
-    // If increment/decrement overflows, go to deferred code.
-    deferred->Branch(vs);
-
-    deferred->BindExit();
-
-    // Store the new value in the target if not const.
-    // At this point the answer is in the value register.
-    frame_->EmitPush(value);
-    // Set the target with the result, leaving the result on
-    // top of the stack.  Removes the target from the stack if
-    // it has a non-zero size.
-    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
-  }
-
-  // Postfix: Discard the new value and use the old.
-  if (is_postfix) frame_->Pop();
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not in
-  // the CC register), we force the right hand side to do the
-  // same. This is necessary because we may have to branch to the exit
-  // after evaluating the left hand side (due to the shortcut
-  // semantics), but the compiler must (statically) know if the result
-  // of compiling the binary operation is materialized or not.
-  if (node->op() == Token::AND) {
-    JumpTarget is_true;
-    LoadCondition(node->left(), &is_true, false_target(), false);
-    if (has_valid_frame() && !has_cc()) {
-      // The left-hand side result is on top of the virtual frame.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      frame_->Dup();
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      ToBoolean(&pop_and_continue, &exit);
-      Branch(false, &exit);
-
-      // Pop the result of evaluating the first part.
-      pop_and_continue.Bind();
-      frame_->Pop();
-
-      // Evaluate right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    } else if (has_cc() || is_true.is_linked()) {
-      // The left-hand side is either (a) partially compiled to
-      // control flow with a final branch left to emit or (b) fully
-      // compiled to control flow and possibly true.
-      if (has_cc()) {
-        Branch(false, false_target());
-      }
-      is_true.Bind();
-      LoadCondition(node->right(), true_target(), false_target(), false);
-    } else {
-      // Nothing to do.
-      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    JumpTarget is_false;
-    LoadCondition(node->left(), true_target(), &is_false, false);
-    if (has_valid_frame() && !has_cc()) {
-      // The left-hand side result is on top of the virtual frame.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      frame_->Dup();
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      ToBoolean(&exit, &pop_and_continue);
-      Branch(true, &exit);
-
-      // Pop the result of evaluating the first part.
-      pop_and_continue.Bind();
-      frame_->Pop();
-
-      // Evaluate right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    } else if (has_cc() || is_false.is_linked()) {
-      // The left-hand side is either (a) partially compiled to
-      // control flow with a final branch left to emit or (b) fully
-      // compiled to control flow and possibly false.
-      if (has_cc()) {
-        Branch(true, true_target());
-      }
-      is_false.Bind();
-      LoadCondition(node->right(), true_target(), false_target(), false);
-    } else {
-      // Nothing to do.
-      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
-    }
-  }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else {
-    // Optimize for the case where (at least) one of the expressions
-    // is a literal small integer.
-    Literal* lliteral = node->left()->AsLiteral();
-    Literal* rliteral = node->right()->AsLiteral();
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    bool overwrite_left = node->left()->ResultOverwriteAllowed();
-    bool overwrite_right = node->right()->ResultOverwriteAllowed();
-
-    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
-      VirtualFrame::RegisterAllocationScope scope(this);
-      Load(node->left());
-      if (frame_->KnownSmiAt(0)) overwrite_left = false;
-      SmiOperation(node->op(),
-                   rliteral->handle(),
-                   false,
-                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
-    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
-      VirtualFrame::RegisterAllocationScope scope(this);
-      Load(node->right());
-      if (frame_->KnownSmiAt(0)) overwrite_right = false;
-      SmiOperation(node->op(),
-                   lliteral->handle(),
-                   true,
-                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (lliteral != NULL) {
-        ASSERT(!lliteral->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      if (rliteral != NULL) {
-        ASSERT(!rliteral->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      VirtualFrame::RegisterAllocationScope scope(this);
-      OverwriteMode overwrite_mode = NO_OVERWRITE;
-      if (overwrite_left) {
-        overwrite_mode = OVERWRITE_LEFT;
-      } else if (overwrite_right) {
-        overwrite_mode = OVERWRITE_RIGHT;
-      }
-      Load(node->left());
-      Load(node->right());
-      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
-    }
-  }
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  frame_->EmitPush(MemOperand(frame_->Function()));
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
-    // Load the operand, move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Register tos = frame_->PopToRegister();
-
-    Register scratch = VirtualFrame::scratch0();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      true_target()->Branch(eq);
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-      __ cmp(tos, ip);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-
-      // It can be an undetectable string object.
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
-      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
-      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-      false_target()->Branch(eq);
-
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
-      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
-      cc_reg_ = lt;
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(tos, ip);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      // It can be an undetectable object.
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
-      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
-      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-      Register map_reg = scratch;
-      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
-      true_target()->Branch(eq);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->object_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      __ LoadRoot(ip, Heap::kNullValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-
-      Register map_reg = scratch;
-      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
-      false_target()->Branch(eq);
-
-      // It can be an undetectable object.
-      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
-      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
-      false_target()->Branch(eq);
-
-      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
-      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
-      false_target()->Branch(lt);
-      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
-      cc_reg_ = le;
-
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      false_target()->Jump();
-    }
-    ASSERT(!has_valid_frame() ||
-           (has_cc() && frame_->height() == original_height));
-    return;
-  }
-
-  switch (op) {
-    case Token::EQ:
-      Comparison(eq, left, right, false);
-      break;
-
-    case Token::LT:
-      Comparison(lt, left, right);
-      break;
-
-    case Token::GT:
-      Comparison(gt, left, right);
-      break;
-
-    case Token::LTE:
-      Comparison(le, left, right);
-      break;
-
-    case Token::GTE:
-      Comparison(ge, left, right);
-      break;
-
-    case Token::EQ_STRICT:
-      Comparison(eq, left, right, true);
-      break;
-
-    case Token::IN: {
-      Load(left);
-      Load(right);
-      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
-      frame_->EmitPush(r0);
-      break;
-    }
-
-    case Token::INSTANCEOF: {
-      Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      frame_->CallStub(&stub, 2);
-      // At this point if instanceof succeeded then r0 == 0.
-      __ tst(r0, Operand(r0));
-      cc_reg_ = eq;
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-  ASSERT((has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Register tos = frame_->PopToRegister();
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(tos, ip);
-
-  // The 'null' value is only equal to 'undefined' if using non-strict
-  // comparisons.
-  if (!node->is_strict()) {
-    true_target()->Branch(eq);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(tos, Operand(ip));
-    true_target()->Branch(eq);
-
-    __ tst(tos, Operand(kSmiTagMask));
-    false_target()->Branch(eq);
-
-    // It can be an undetectable object.
-    __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-    __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
-    __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
-    __ cmp(tos, Operand(1 << Map::kIsUndetectable));
-  }
-
-  cc_reg_ = eq;
-  ASSERT(has_cc() && frame_->height() == original_height);
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetNamedValue(Register receiver,
-                                          Handle<String> name,
-                                          bool is_contextual)
-      : receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-// Convention for this is that on entry the receiver is in a register that
-// is not used by the stack.  On exit the answer is found in that same
-// register and the stack has the same height.
-void DeferredReferenceGetNamedValue::Generate() {
-#ifdef DEBUG
-  int expected_height = frame_state()->frame()->height();
-#endif
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
-  __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
-  __ Move(r0, receiver_);
-  __ mov(r2, Operand(name_));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        Builtins::kLoadIC_Initialize));
-    RelocInfo::Mode mode = is_contextual_
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    __ Call(ic,  mode);
-    // We must mark the code just after the call with the correct marker.
-    MacroAssembler::NopMarkerTypes code_marker;
-    if (is_contextual_) {
-      code_marker = is_dont_delete_
-                   ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
-                   : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
-    } else {
-      code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
-    }
-    __ MarkCode(code_marker);
-
-    // At this point the answer is in r0.  We move it to the expected register
-    // if necessary.
-    __ Move(receiver_, r0);
-
-    // Now go back to the frame that we entered with.  This will not overwrite
-    // the receiver register since that register was not in use when we came
-    // in.  The instructions emitted by this merge are skipped over by the
-    // inline load patching mechanism when looking for the branch instruction
-    // that tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-  ASSERT_EQ(expected_height, frame_state()->frame()->height());
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceGetKeyedValue(Register key, Register receiver)
-      : key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register key_;
-  Register receiver_;
-};
-
-
-// Takes key and register in r0 and r1 or vice versa.  Returns result
-// in r0.
-void DeferredReferenceGetKeyedValue::Generate() {
-  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
-         (key_.is(r1) && receiver_.is(r0)));
-
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
-  // convention.
-  if (key_.is(r1)) {
-    __ Swap(r0, r1, ip);
-  }
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // keyed load has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Now go back to the frame that we entered with.  This will not overwrite
-    // the receiver or key registers since they were not in use when we came
-    // in.  The instructions emitted by this merge are skipped over by the
-    // inline load patching mechanism when looking for the branch instruction
-    // that tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
-  // calling convention.
-  if (value_.is(r1)) {
-    __ Swap(r0, r1, ip);
-  }
-  ASSERT(receiver_.is(r2));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed store IC. It has the arguments value, key and receiver in r0,
-    // r1 and r2.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        (strict_mode_ == kStrictMode)
-        ? Builtins::kKeyedStoreIC_Initialize_Strict
-        : Builtins::kKeyedStoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // keyed store has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceSetNamedValue(Register value,
-                                 Register receiver,
-                                 Handle<String> name,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        receiver_(receiver),
-        name_(name),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetNamedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register receiver_;
-  Handle<String> name_;
-  StrictModeFlag strict_mode_;
-};
-
-
-// Takes value in r0, receiver in r1 and returns the result (the
-// value) in r0.
-void DeferredReferenceSetNamedValue::Generate() {
-  // Record the entry frame and spill.
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  // Ensure value in r0, receiver in r1 to match store ic calling
-  // convention.
-  ASSERT(value_.is(r0) && receiver_.is(r1));
-  __ mov(r2, Operand(name_));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed store IC. It has the arguments value, key and receiver in r0,
-    // r1 and r2.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                      : Builtins::kStoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // named store has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Go back to the frame we entered with. The instructions
-    // generated by this merge are skipped over by the inline store
-    // patching mechanism when looking for the branch instruction that
-    // tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-// Consumes the top of stack (the receiver) and pushes the result instead.
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-  bool contextual_load_in_builtin =
-      is_contextual &&
-      (ISOLATE->bootstrapper()->IsActive() ||
-      (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
-  if (scope()->is_global_scope() ||
-      loop_nesting() == 0 ||
-      contextual_load_in_builtin) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    // Setup the name register and call load IC.
-    frame_->CallLoadIC(name,
-                       is_contextual
-                           ? RelocInfo::CODE_TARGET_CONTEXT
-                           : RelocInfo::CODE_TARGET);
-    frame_->EmitPush(r0);  // Push answer.
-  } else {
-    // Inline the in-object property case.
-    Comment cmnt(masm(), is_contextual
-                             ? "[ Inlined contextual property load"
-                             : "[ Inlined named property load");
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    if (is_contextual) {
-      __ IncrementCounter(
-          masm_->isolate()->counters()->named_load_global_inline(),
-          1, frame_->scratch0(), frame_->scratch1());
-    } else {
-      __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
-                          1, frame_->scratch0(), frame_->scratch1());
-    }
-
-    // The following instructions are the inlined load of an in-object property.
-    // Parts of this code is patched, so the exact instructions generated needs
-    // to be fixed. Therefore the instruction pool is blocked when generating
-    // this code
-
-    // Load the receiver from the stack.
-    Register receiver = frame_->PopToRegister();
-
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
-
-    bool is_dont_delete = false;
-    if (is_contextual) {
-      if (!info_->closure().is_null()) {
-        // When doing lazy compilation we can check if the global cell
-        // already exists and use its "don't delete" status as a hint.
-        AssertNoAllocation no_gc;
-        v8::internal::GlobalObject* global_object =
-            info_->closure()->context()->global();
-        LookupResult lookup;
-        global_object->LocalLookupRealNamedProperty(*name, &lookup);
-        if (lookup.IsProperty() && lookup.type() == NORMAL) {
-          ASSERT(lookup.holder() == global_object);
-          ASSERT(global_object->property_dictionary()->ValueAt(
-              lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
-          is_dont_delete = lookup.IsDontDelete();
-        }
-      }
-      if (is_dont_delete) {
-        __ IncrementCounter(
-            masm_->isolate()->counters()->dont_delete_hint_hit(),
-            1, frame_->scratch0(), frame_->scratch1());
-      }
-    }
-
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      if (!is_contextual) {
-        // Check that the receiver is a heap object.
-        __ tst(receiver, Operand(kSmiTagMask));
-        deferred->Branch(eq);
-      }
-
-      // Check for the_hole_value if necessary.
-      // Below we rely on the number of instructions generated, and we can't
-      // cope with the Check macro which does not generate a fixed number of
-      // instructions.
-      Label skip, check_the_hole, cont;
-      if (FLAG_debug_code && is_contextual && is_dont_delete) {
-        __ b(&skip);
-        __ bind(&check_the_hole);
-        __ Check(ne, "DontDelete cells can't contain the hole");
-        __ b(&cont);
-        __ bind(&skip);
-      }
-
-#ifdef DEBUG
-      int InlinedNamedLoadInstructions = 5;
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-
-      Register scratch = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-
-      // Check the map. The null map used below is patched by the inline cache
-      // code.  Therefore we can't use a LoadRoot call.
-      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-      __ mov(scratch2, Operand(FACTORY->null_value()));
-      __ cmp(scratch, scratch2);
-      deferred->Branch(ne);
-
-      if (is_contextual) {
-#ifdef DEBUG
-        InlinedNamedLoadInstructions += 1;
-#endif
-        // Load the (initially invalid) cell and get its value.
-        masm()->mov(receiver, Operand(FACTORY->null_value()));
-        __ ldr(receiver,
-               FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
-
-        deferred->set_is_dont_delete(is_dont_delete);
-
-        if (!is_dont_delete) {
-#ifdef DEBUG
-          InlinedNamedLoadInstructions += 3;
-#endif
-          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
-          deferred->Branch(eq);
-        } else if (FLAG_debug_code) {
-#ifdef DEBUG
-          InlinedNamedLoadInstructions += 3;
-#endif
-          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
-          __ b(&check_the_hole, eq);
-          __ bind(&cont);
-        }
-      } else {
-        // Initially use an invalid index. The index will be patched by the
-        // inline cache code.
-        __ ldr(receiver, MemOperand(receiver, 0));
-      }
-
-      // Make sure that the expected number of instructions are generated.
-      // If the code before is updated, the offsets in ic-arm.cc
-      // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
-      // to be updated.
-      ASSERT_EQ(InlinedNamedLoadInstructions,
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    deferred->BindExit();
-    // At this point the receiver register has the result, either from the
-    // deferred code or from the inlined code.
-    frame_->EmitPush(receiver);
-  }
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-
-    // Get the value and receiver from the stack.
-    frame()->PopToR0();
-    Register value = r0;
-    frame()->PopToR1();
-    Register receiver = r1;
-
-    DeferredReferenceSetNamedValue* deferred =
-        new DeferredReferenceSetNamedValue(
-          value, receiver, name, strict_mode_flag());
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // The following instructions are the part of the inlined
-    // in-object property store code which can be patched. Therefore
-    // the exact number of instructions generated must be fixed, so
-    // the constant pool is blocked while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      Register scratch0 = VirtualFrame::scratch0();
-      Register scratch1 = VirtualFrame::scratch1();
-
-      // Check the map. Initially use an invalid map to force a
-      // failure. The map check will be patched in the runtime system.
-      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-      __ mov(scratch0, Operand(FACTORY->null_value()));
-      __ cmp(scratch0, scratch1);
-      deferred->Branch(ne);
-
-      int offset = 0;
-      __ str(value, MemOperand(receiver, offset));
-
-      // Update the write barrier and record its size. We do not use
-      // the RecordWrite macro here because we want the offset
-      // addition instruction first to make it easy to patch.
-      Label record_write_start, record_write_done;
-      __ bind(&record_write_start);
-      // Add offset into the object.
-      __ add(scratch0, receiver, Operand(offset));
-      // Test that the object is not in the new space.  We cannot set
-      // region marks for new space pages.
-      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
-      // Record the actual write.
-      __ RecordWriteHelper(receiver, scratch0, scratch1);
-      __ bind(&record_write_done);
-      // Clobber all input registers when running with the debug-code flag
-      // turned on to provoke errors.
-      if (FLAG_debug_code) {
-        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
-        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
-      }
-      // Check that this is the first inlined write barrier or that
-      // this inlined write barrier has the same size as all the other
-      // inlined write barriers.
-      ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
-             (Isolate::Current()->inlined_write_barrier_size() ==
-              masm()->InstructionsGeneratedSince(&record_write_start)));
-      Isolate::Current()->set_inlined_write_barrier_size(
-          masm()->InstructionsGeneratedSince(&record_write_start));
-
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
-                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-    deferred->BindExit();
-  }
-  ASSERT_EQ(expected_height, frame()->height());
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
-  if (loop_nesting() == 0) {
-    Comment cmnt(masm_, "[ Load from keyed property");
-    frame_->CallKeyedLoadIC();
-  } else {
-    // Inline the keyed load.
-    Comment cmnt(masm_, "[ Inlined load from keyed property");
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
-                        1, frame_->scratch0(), frame_->scratch1());
-
-    // Load the key and receiver from the stack.
-    bool key_is_known_smi = frame_->KnownSmiAt(0);
-    Register key = frame_->PopToRegister();
-    Register receiver = frame_->PopToRegister(key);
-
-    // The deferred code expects key and receiver in registers.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(key, receiver);
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // The following instructions are the part of the inlined load keyed
-    // property code which can be patched. Therefore the exact number of
-    // instructions generated need to be fixed, so the constant pool is blocked
-    // while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      Register scratch1 = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-      // Check the map. The null map used below is patched by the inline cache
-      // code.
-      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-      // Check that the key is a smi.
-      if (!key_is_known_smi) {
-        __ tst(key, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-      __ mov(scratch2, Operand(FACTORY->null_value()));
-      __ cmp(scratch1, scratch2);
-      deferred->Branch(ne);
-
-      // Get the elements array from the receiver.
-      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-      __ AssertFastElements(scratch1);
-
-      // Check that key is within bounds. Use unsigned comparison to handle
-      // negative keys.
-      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
-      __ cmp(scratch2, key);
-      deferred->Branch(ls);  // Unsigned less equal.
-
-      // Load and check that the result is not the hole (key is a smi).
-      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
-      __ add(scratch1,
-             scratch1,
-             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ ldr(scratch1,
-             MemOperand(scratch1, key, LSL,
-                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-      __ cmp(scratch1, scratch2);
-      deferred->Branch(eq);
-
-      __ mov(r0, scratch1);
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    deferred->BindExit();
-  }
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
-                                   WriteBarrierCharacter wb_info) {
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    // Inline the keyed store.
-    Comment cmnt(masm_, "[ Inlined store to keyed property");
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-    Register scratch3 = r3;
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
-                        1, scratch1, scratch2);
-
-
-    // Load the value, key and receiver from the stack.
-    bool value_is_harmless = frame_->KnownSmiAt(0);
-    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
-    bool key_is_smi = frame_->KnownSmiAt(1);
-    Register value = frame_->PopToRegister();
-    Register key = frame_->PopToRegister(value);
-    VirtualFrame::SpilledScope spilled(frame_);
-    Register receiver = r2;
-    frame_->EmitPop(receiver);
-
-#ifdef DEBUG
-    bool we_remembered_the_write_barrier = value_is_harmless;
-#endif
-
-    // The deferred code expects value, key and receiver in registers.
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(
-          value, key, receiver, strict_mode_flag());
-
-    // Check that the value is a smi. As this inlined code does not set the
-    // write barrier it is only possible to store smi values.
-    if (!value_is_harmless) {
-      // If the value is not likely to be a Smi then let's test the fixed array
-      // for new space instead.  See below.
-      if (wb_info == LIKELY_SMI) {
-        __ tst(value, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-#ifdef DEBUG
-        we_remembered_the_write_barrier = true;
-#endif
-      }
-    }
-
-    if (!key_is_smi) {
-      // Check that the key is a smi.
-      __ tst(key, Operand(kSmiTagMask));
-      deferred->Branch(ne);
-    }
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // Check that the receiver is a JSArray.
-    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
-    deferred->Branch(ne);
-
-    // Get the elements array from the receiver.
-    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-    if (!value_is_harmless && wb_info != LIKELY_SMI) {
-      Label ok;
-      __ and_(scratch2,
-              scratch1,
-              Operand(ExternalReference::new_space_mask(isolate())));
-      __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
-      __ tst(value, Operand(kSmiTagMask), ne);
-      deferred->Branch(ne);
-#ifdef DEBUG
-      we_remembered_the_write_barrier = true;
-#endif
-    }
-    // Check that the elements array is not a dictionary.
-    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
-
-    // The following instructions are the part of the inlined store keyed
-    // property code which can be patched. Therefore the exact number of
-    // instructions generated need to be fixed, so the constant pool is blocked
-    // while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-
-      // Read the fixed array map from the constant pool (not from the root
-      // array) so that the value can be patched.  When debugging, we patch this
-      // comparison to always fail so that we will hit the IC call in the
-      // deferred code which will allow the debugger to break for fast case
-      // stores.
-      __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
-      __ cmp(scratch2, scratch3);
-      deferred->Branch(ne);
-
-      // Check that the key is within bounds.  Both the key and the length of
-      // the JSArray are smis (because the fixed array check above ensures the
-      // elements are in fast case). Use unsigned comparison to handle negative
-      // keys.
-      __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
-      __ cmp(scratch3, key);
-      deferred->Branch(ls);  // Unsigned less equal.
-
-      // Store the value.
-      __ add(scratch1, scratch1,
-             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ str(value,
-             MemOperand(scratch1, key, LSL,
-                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    ASSERT(we_remembered_the_write_barrier);
-
-    deferred->BindExit();
-  } else {
-    frame()->CallKeyedStoreIC(strict_mode_flag());
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
-}
-
-
-void Reference::DupIfPersist() {
-  if (persist_after_get_) {
-    switch (type_) {
-      case KEYED:
-        cgen_->frame()->Dup2();
-        break;
-      case NAMED:
-        cgen_->frame()->Dup();
-        // Fall through.
-      case UNLOADED:
-      case ILLEGAL:
-      case SLOT:
-        // Do nothing.
-        ;
-    }
-  } else {
-    set_unloaded();
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  MacroAssembler* masm = cgen_->masm();
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      DupIfPersist();
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      Handle<String> name = GetName();
-      DupIfPersist();
-      cgen_->EmitNamedLoad(name, is_global);
-      break;
-    }
-
-    case KEYED: {
-      ASSERT(property != NULL);
-      DupIfPersist();
-      cgen_->EmitKeyedLoad();
-      cgen_->frame()->EmitPush(r0);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  MacroAssembler* masm = cgen_->masm();
-  VirtualFrame* frame = cgen_->frame();
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      cgen_->EmitNamedStore(GetName(), false);
-      frame->EmitPush(r0);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression_->AsProperty();
-      ASSERT(property != NULL);
-      cgen_->CodeForSourcePosition(property->position());
-      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
-      frame->EmitPush(r0);
-      set_unloaded();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int len = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, len),
-               "GenericBinaryOpStub_%s_%s%s_%s",
-               op_name,
-               overwrite_name,
-               specialized_on_rhs_ ? "_ConstantRhs" : "",
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 9b1f103..01aa805 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,162 +37,8 @@
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
-  // This is in preparation for something that uses the reference on the stack.
-  // If we need this reference afterwards get then dup it now.  Otherwise mark
-  // it as used.
-  inline void DupIfPersist();
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  virtual ~CodeGenState();
-
-  virtual JumpTarget* true_target() const { return NULL; }
-  virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
-  inline CodeGenerator* owner() { return owner_; }
-  inline CodeGenState* previous() const { return previous_; }
-
- private:
-  CodeGenerator* owner_;
-  CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own pair of branch labels.
-  ConditionCodeGenState(CodeGenerator* owner,
-                        JumpTarget* true_target,
-                        JumpTarget* false_target);
-
-  virtual JumpTarget* true_target() const { return true_target_; }
-  virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
-  TypeInfoCodeGenState(CodeGenerator* owner,
-                       Slot* slot_number,
-                       TypeInfo info);
-  ~TypeInfoCodeGenState();
-
-  virtual JumpTarget* true_target() const { return previous()->true_target(); }
-  virtual JumpTarget* false_target() const {
-    return previous()->false_target();
-  }
-
- private:
-  Slot* slot_;
-  TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
 
 // -------------------------------------------------------------------------
 // CodeGenerator
@@ -225,45 +71,6 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  TypeInfo type_info(Slot* slot) {
-    int index = NumberOfSlot(slot);
-    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
-    return (*type_info_)[index];
-  }
-
-  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
-    int index = NumberOfSlot(slot);
-    ASSERT(index >= kInvalidSlotNumber);
-    if (index != kInvalidSlotNumber) {
-      TypeInfo previous_value = (*type_info_)[index];
-      (*type_info_)[index] = info;
-      return previous_value;
-    }
-    return TypeInfo::Unknown();
-  }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     return FLAG_debug_code ? 32 : 13;
@@ -275,317 +82,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  static const int kInvalidSlotNumber = -1;
-
-  int NumberOfSlot(Slot* slot);
-
-  // State
-  bool has_cc() const { return cc_reg_ != al; }
-  JumpTarget* true_target() const { return state_->true_target(); }
-  JumpTarget* false_target() const { return state_->false_target(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).  The return value should
-  // be in r0.
-  void GenerateReturnSequence();
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  void StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  MemOperand SlotOperand(Slot* slot, Register tmp);
-
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
-                                               Register tmp,
-                                               Register tmp2,
-                                               JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* x,
-                     JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool force_cc);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
-  // Store the value on top of the stack to a slot.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Load a named property, returning it in r0. The receiver is passed on the
-  // stack, and remains there.
-  void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // Store to a named property. If the store is contextual, value is passed on
-  // the frame and consumed. Otherwise, receiver and value are passed on the
-  // frame and consumed. The result is returned in r0.
-  void EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a keyed property, leaving it in r0.  The receiver and key are
-  // passed on the stack, and remain there.
-  void EmitKeyedLoad();
-
-  // Store a keyed property. Key and receiver are on the stack and the value is
-  // in r0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(Token::Value op,
-                              OverwriteMode overwrite_mode,
-                              GenerateInlineSmi inline_smi,
-                              int known_rhs =
-                                  GenericBinaryOpStub::kUnknownIntValue);
-  void Comparison(Condition cc,
-                  Expression* left,
-                  Expression* right,
-                  bool strict = false);
-
-  void SmiOperation(Token::Value op,
-                    Handle<Object> value,
-                    bool reversed,
-                    OverwriteMode mode);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  // Control flow
-  void Branch(bool if_true, JumpTarget* target);
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  static Handle<Code> ComputeLazyCompile(int argc);
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.
-  bool HasValidEntryRegisters();
-#endif
-
-  List<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  Condition cc_reg_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  Vector<TypeInfo>* type_info_;
-
-  // Jump targets
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class InlineRuntimeFunctionsTable;
-  friend class LCodeGen;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index a1823a1..823c6ff 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -28,12 +28,9 @@
 #ifndef V8_ARM_CONSTANTS_ARM_H_
 #define V8_ARM_CONSTANTS_ARM_H_
 
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware.  One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
+// ARM EABI is required.
+#if defined(__arm__) && !defined(__ARM_EABI__)
+#error ARM EABI support is required.
 #endif
 
 // This means that interwork-compatible jump instructions are generated.  We
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 5bd2029..51cfeb6 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -75,62 +75,33 @@
   register uint32_t end asm("a2") =
       reinterpret_cast<uint32_t>(start) + size;
   register uint32_t flg asm("a3") = 0;
-  #ifdef __ARM_EABI__
-    #if defined (__arm__) && !defined(__thumb__)
-      // __arm__ may be defined in thumb mode.
-      register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
-      asm volatile(
-          "svc 0x0"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg), "r" (scno));
-    #else
-      // r7 is reserved by the EABI in thumb mode.
-      asm volatile(
-      "@   Enter ARM Mode  \n\t"
-          "adr r3, 1f      \n\t"
-          "bx  r3          \n\t"
-          ".ALIGN 4        \n\t"
-          ".ARM            \n"
-      "1:  push {r7}       \n\t"
-          "mov r7, %4      \n\t"
-          "svc 0x0         \n\t"
-          "pop {r7}        \n\t"
-      "@   Enter THUMB Mode\n\t"
-          "adr r3, 2f+1    \n\t"
-          "bx  r3          \n\t"
-          ".THUMB          \n"
-      "2:                  \n\t"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
-          : "r3");
-    #endif
+  #if defined (__arm__) && !defined(__thumb__)
+    // __arm__ may be defined in thumb mode.
+    register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+    asm volatile(
+        "svc 0x0"
+        : "=r" (beg)
+        : "0" (beg), "r" (end), "r" (flg), "r" (scno));
   #else
-    #if defined (__arm__) && !defined(__thumb__)
-      // __arm__ may be defined in thumb mode.
-      asm volatile(
-          "svc %1"
-          : "=r" (beg)
-          : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
-    #else
-      // Do not use the value of __ARM_NR_cacheflush in the inline assembly
-      // below, because the thumb mode value would be used, which would be
-      // wrong, since we switch to ARM mode before executing the svc instruction
-      asm volatile(
-      "@   Enter ARM Mode  \n\t"
-          "adr r3, 1f      \n\t"
-          "bx  r3          \n\t"
-          ".ALIGN 4        \n\t"
-          ".ARM            \n"
-      "1:  svc 0x9f0002    \n"
-      "@   Enter THUMB Mode\n\t"
-          "adr r3, 2f+1    \n\t"
-          "bx  r3          \n\t"
-          ".THUMB          \n"
-      "2:                  \n\t"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg)
-          : "r3");
-    #endif
+    // r7 is reserved by the EABI in thumb mode.
+    asm volatile(
+    "@   Enter ARM Mode  \n\t"
+        "adr r3, 1f      \n\t"
+        "bx  r3          \n\t"
+        ".ALIGN 4        \n\t"
+        ".ARM            \n"
+    "1:  push {r7}       \n\t"
+        "mov r7, %4      \n\t"
+        "svc 0x0         \n\t"
+        "pop {r7}        \n\t"
+    "@   Enter THUMB Mode\n\t"
+        "adr r3, 2f+1    \n\t"
+        "bx  r3          \n\t"
+        ".THUMB          \n"
+    "2:                  \n\t"
+        : "=r" (beg)
+        : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
+        : "r3");
   #endif
 #endif
 }
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e6ad98c..07a2272 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 namespace v8 {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 3267951..871b453 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -245,7 +245,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       Label ok;
       __ LoadRoot(ip, Heap::kStackLimitRootIndex);
       __ cmp(sp, Operand(ip));
@@ -431,8 +431,7 @@
     if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-    if (false_label_ != fall_through_) __ b(false_label_);
-      __ b(false_label_);
+      if (false_label_ != fall_through_) __ b(false_label_);
     } else {
       if (true_label_ != fall_through_) __ b(true_label_);
     }
@@ -824,7 +823,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -851,7 +850,7 @@
       __ cmp(r1, r0);
       __ b(ne, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ b(clause->body_target()->entry_label());
+      __ b(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -862,7 +861,7 @@
     __ cmp(r0, Operand(0));
     __ b(ne, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ b(clause->body_target()->entry_label());
+    __ b(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -872,14 +871,14 @@
   if (default_clause == NULL) {
     __ b(nested_statement.break_target());
   } else {
-    __ b(default_clause->body_target()->entry_label());
+    __ b(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -2595,9 +2594,9 @@
 
   // Set the bit in the map to indicate that it has been checked safe for
   // default valueOf and set true result.
-  __ ldrb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  __ strb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ jmp(if_true);
 
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -3885,7 +3884,7 @@
   if (assign_type == VARIABLE) {
     PrepareForBailout(expr->expression(), TOS_REG);
   } else {
-    PrepareForBailout(expr->increment(), TOS_REG);
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
   }
 
   // Call ToNumber only if operand is not a smi.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index dc4f761..8acf7c2 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #include "assembler-arm.h"
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "disasm.h"
 #include "ic-inl.h"
 #include "runtime.h"
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
deleted file mode 100644
index df370c4..0000000
--- a/src/arm/jump-target-arm.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (entry_frame_set_) {
-    if (entry_label_.is_bound()) {
-      // If we already bound and generated code at the destination then it
-      // is too late to ask for less optimistic type assumptions.
-      ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
-    }
-    // There already a frame expectation at the target.
-    cgen()->frame()->MergeTo(&entry_frame_);
-    cgen()->DeleteFrame();
-  } else {
-    // Clone the current frame to use as the expected one at the target.
-    set_entry_frame(cgen()->frame());
-    // Zap the fall-through frame since the jump was unconditional.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-  if (entry_label_.is_bound()) {
-    // You can't jump backwards to an already bound label unless you admitted
-    // up front that this was a bidirectional jump target.  Bidirectional jump
-    // targets will zap their type info when bound in case some later virtual
-    // frame with less precise type info branches to them.
-    ASSERT(direction_ != FORWARD_ONLY);
-  }
-  __ jmp(&entry_label_);
-}
-
-
-void JumpTarget::DoBranch(Condition cond, Hint ignored) {
-  ASSERT(cgen()->has_valid_frame());
-
-  if (entry_frame_set_) {
-    if (entry_label_.is_bound()) {
-      // If we already bound and generated code at the destination then it
-      // is too late to ask for less optimistic type assumptions.
-      ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
-    }
-    // We have an expected frame to merge to on the backward edge.
-    cgen()->frame()->MergeTo(&entry_frame_, cond);
-  } else {
-    // Clone the current frame to use as the expected one at the target.
-    set_entry_frame(cgen()->frame());
-  }
-  if (entry_label_.is_bound()) {
-    // You can't branch backwards to an already bound label unless you admitted
-    // up front that this was a bidirectional jump target.  Bidirectional jump
-    // targets will zap their type info when bound in case some later virtual
-    // frame with less precise type info branches to them.
-    ASSERT(direction_ != FORWARD_ONLY);
-  }
-  __ b(cond, &entry_label_);
-  if (cond == al) {
-    cgen()->DeleteFrame();
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  // Calls are always 'forward' so we use a copy of the current frame (plus
-  // one for a return address) as the expected frame.
-  ASSERT(!entry_frame_set_);
-  VirtualFrame target_frame = *cgen()->frame();
-  target_frame.Adjust(1);
-  set_entry_frame(&target_frame);
-
-  __ bl(&entry_label_);
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  if (cgen()->has_valid_frame()) {
-    if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
-    // If there is a current frame we can use it on the fall through.
-    if (!entry_frame_set_) {
-      entry_frame_ = *cgen()->frame();
-      entry_frame_set_ = true;
-    } else {
-      cgen()->frame()->MergeTo(&entry_frame_);
-      // On fall through we may have to merge both ways.
-      if (direction_ != FORWARD_ONLY) {
-        // This will not need to adjust the virtual frame entries that are
-        // register allocated since that was done above and they now match.
-        // But it does need to adjust the entry_frame_ of this jump target
-        // to make it potentially less optimistic.  Later code can branch back
-        // to this jump target and we need to assert that that code does not
-        // have weaker assumptions about types.
-        entry_frame_.MergeTo(cgen()->frame());
-      }
-    }
-  } else {
-    // If there is no current frame we must have an entry frame which we can
-    // copy.
-    ASSERT(entry_frame_set_);
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index a5216ad..3f1d15b 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1114,9 +1114,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
@@ -1329,7 +1329,7 @@
     return DoArithmeticD(Token::DIV, instr);
   } else if (instr->representation().IsInteger32()) {
     // TODO(1042) The fixed register allocation
-    // is needed because we call GenericBinaryOpStub from
+    // is needed because we call TypeRecordingBinaryOpStub from
     // the generated code, which requires registers r0
     // and r1 to be used. We should remove that
     // when we provide a native implementation.
@@ -1840,21 +1840,20 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
-  // TODO(danno): Add support for other external array types.
-  if (instr->array_type() != kExternalPixelArray) {
-    Abort("unsupported load for external array type.");
-    return NULL;
-  }
-
-  ASSERT(instr->representation().IsInteger32());
+  ExternalArrayType array_type = instr->array_type();
+  Representation representation(instr->representation());
+  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+         (representation.IsDouble() && array_type == kExternalFloatArray));
   ASSERT(instr->key()->representation().IsInteger32());
-  LOperand* external_pointer =
-      UseRegisterAtStart(instr->external_pointer());
-  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  LOperand* key = UseRegister(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer,
-                                            key);
-  return DefineAsRegister(result);
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+  LInstruction* load_instr = DefineAsRegister(result);
+  // An unsigned int array load might overflow and cause a deopt, make sure it
+  // has an environment.
+  return (array_type == kExternalUnsignedIntArray) ?
+      AssignEnvironment(load_instr) : load_instr;
 }
 
 
@@ -1889,23 +1888,24 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
-  // TODO(danno): Add support for other external array types.
-  if (instr->array_type() != kExternalPixelArray) {
-    Abort("unsupported store for external array type.");
-    return NULL;
-  }
-
-  ASSERT(instr->value()->representation().IsInteger32());
+  Representation representation(instr->value()->representation());
+  ExternalArrayType array_type = instr->array_type();
+  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+         (representation.IsDouble() && array_type == kExternalFloatArray));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* value = UseTempRegister(instr->value());  // changed by clamp.
+  bool val_is_temp_register = array_type == kExternalPixelArray ||
+      array_type == kExternalFloatArray;
+  LOperand* val = val_is_temp_register
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
   LOperand* key = UseRegister(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
-                                                value);
+                                                val);
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index f406f95..6da7c86 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1309,6 +1309,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1639,6 +1640,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1678,6 +1680,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b214169..6af30c4 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -484,11 +484,19 @@
 void LCodeGen::CallCode(Handle<Code> code,
                         RelocInfo::Mode mode,
                         LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ Call(code, mode);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode);
 }
 
 
@@ -501,11 +509,21 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -517,8 +535,16 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        0,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -650,6 +676,8 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(expected_safepoint_kind_ == kind);
+
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
@@ -1015,7 +1043,7 @@
   Register left = ToRegister(instr->InputAt(0));
   Register right = ToRegister(instr->InputAt(1));
 
-  __ PushSafepointRegistersAndDoubles();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
   // Move left to r1 and right to r0 for the stub call.
   if (left.is(r1)) {
     __ Move(r0, right);
@@ -1037,7 +1065,6 @@
                                          Safepoint::kNoDeoptimizationIndex);
   // Overwrite the stored value of r0 with the result of the stub.
   __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-  __ PopSafepointRegistersAndDoubles();
 }
 
 
@@ -1460,11 +1487,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ PopSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 
@@ -2065,7 +2089,7 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(flags);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
   // Get the temp register reserved by the instruction. This needs to be r4 as
   // its slot of the pushing of safepoint registers is used to communicate the
@@ -2080,12 +2104,13 @@
   __ BlockConstPoolFor(kAdditionalDelta);
   __ mov(temp, Operand(delta * kPointerSize));
   __ StoreToSafepointRegisterSlot(temp, temp);
-  CallCode(stub.GetCode(),  RelocInfo::CODE_TARGET, instr);
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   // Put the result value into the result register slot and
   // restore all registers.
   __ StoreToSafepointRegisterSlot(result, result);
-
-  __ PopSafepointRegisters();
 }
 
 
@@ -2217,7 +2242,9 @@
   ASSERT(ToRegister(instr->value()).is(r0));
 
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
 }
 
@@ -2383,12 +2410,14 @@
     __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
     __ cmp(scratch, ip);
     __ b(eq, &done);
-    __ LoadRoot(ip, Heap::kExternalPixelArrayMapRootIndex);
-    __ cmp(scratch, ip);
-    __ b(eq, &done);
     __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
     __ cmp(scratch, ip);
-    __ Check(eq, "Check for fast elements failed.");
+    __ b(eq, &done);
+    __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+    __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
+    __ cmp(scratch, Operand(kExternalArrayTypeCount));
+    __ Check(cc, "Check for fast elements failed.");
     __ bind(&done);
   }
 }
@@ -2441,14 +2470,51 @@
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  ASSERT(instr->array_type() == kExternalPixelArray);
-
   Register external_pointer = ToRegister(instr->external_pointer());
   Register key = ToRegister(instr->key());
-  Register result = ToRegister(instr->result());
-
-  // Load the result.
-  __ ldrb(result, MemOperand(external_pointer, key));
+  ExternalArrayType array_type = instr->array_type();
+  if (array_type == kExternalFloatArray) {
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      DwVfpRegister result(ToDoubleRegister(instr->result()));
+      __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+      __ vldr(result, scratch0(), 0);
+    } else {
+      Register result(ToRegister(instr->result()));
+      __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+    }
+  } else {
+    Register result(ToRegister(instr->result()));
+    switch (array_type) {
+      case kExternalByteArray:
+        __ ldrsb(result, MemOperand(external_pointer, key));
+        break;
+      case kExternalUnsignedByteArray:
+      case kExternalPixelArray:
+        __ ldrb(result, MemOperand(external_pointer, key));
+        break;
+      case kExternalShortArray:
+        __ ldrsh(result, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalUnsignedShortArray:
+        __ ldrh(result, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalIntArray:
+        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        break;
+      case kExternalUnsignedIntArray:
+        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        __ cmp(result, Operand(0x80000000));
+        // TODO(danno): we could be more clever here, perhaps having a special
+        // version of the stub that detects if the overflow case actually
+        // happens, and generate code that returns a double rather than int.
+        DeoptimizeIf(cs, instr->environment());
+        break;
+      case kExternalFloatArray:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -2639,7 +2705,7 @@
   __ Call(ip);
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2677,44 +2743,43 @@
 
   // Input is negative. Reverse its sign.
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
-  // Registers were saved at the safepoint, so we can use
-  // many scratch registers.
-  Register tmp1 = input.is(r1) ? r0 : r1;
-  Register tmp2 = input.is(r2) ? r0 : r2;
-  Register tmp3 = input.is(r3) ? r0 : r3;
-  Register tmp4 = input.is(r4) ? r0 : r4;
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r1) ? r0 : r1;
+    Register tmp2 = input.is(r2) ? r0 : r2;
+    Register tmp3 = input.is(r3) ? r0 : r3;
+    Register tmp4 = input.is(r4) ? r0 : r4;
 
-  // exponent: floating point exponent value.
+    // exponent: floating point exponent value.
 
-  Label allocated, slow;
-  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
-  __ b(&allocated);
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
 
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
 
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  // Set the pointer to the new heap number in tmp.
-  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
-  // Restore input_reg after call to runtime.
-  __ LoadFromSafepointRegisterSlot(input, input);
-  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
 
-  __ bind(&allocated);
-  // exponent: floating point exponent value.
-  // tmp1: allocated heap number.
-  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
-  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
-  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
-  __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+    __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
 
-  __ StoreToSafepointRegisterSlot(tmp1, input);
-  __ PopSafepointRegisters();
+    __ StoreToSafepointRegisterSlot(tmp1, input);
+  }
 
   __ bind(&done);
 }
@@ -3071,7 +3136,7 @@
 
   // Name is always in r2.
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3112,15 +3177,45 @@
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  ASSERT(instr->array_type() == kExternalPixelArray);
 
   Register external_pointer = ToRegister(instr->external_pointer());
   Register key = ToRegister(instr->key());
-  Register value = ToRegister(instr->value());
-
-  // Clamp the value to [0..255].
-  __ Usat(value, 8, Operand(value));
-  __ strb(value, MemOperand(external_pointer, key, LSL, 0));
+  ExternalArrayType array_type = instr->array_type();
+  if (array_type == kExternalFloatArray) {
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      DwVfpRegister value(ToDoubleRegister(instr->value()));
+      __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+      __ vstr(value, scratch0(), 0);
+    } else {
+      Register value(ToRegister(instr->value()));
+      __ str(value, MemOperand(external_pointer, key, LSL, 2));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    switch (array_type) {
+      case kExternalPixelArray:
+        // Clamp the value to [0..255].
+        __ Usat(value, 8, Operand(value));
+        __ strb(value, MemOperand(external_pointer, key));
+        break;
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ strb(value, MemOperand(external_pointer, key));
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ strh(value, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ str(value, MemOperand(external_pointer, key, LSL, 2));
+        break;
+      case kExternalFloatArray:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -3129,7 +3224,7 @@
   ASSERT(ToRegister(instr->key()).is(r1));
   ASSERT(ToRegister(instr->value()).is(r0));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3252,7 +3347,7 @@
   // contained in the register pointer map.
   __ mov(result, Operand(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3265,15 +3360,12 @@
     __ SmiTag(index);
     __ push(index);
   }
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(r0);
   }
   __ SmiUntag(r0);
   __ StoreToSafepointRegisterSlot(r0, result);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3316,14 +3408,11 @@
   // contained in the register pointer map.
   __ mov(result, Operand(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   __ SmiTag(char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(r0, result);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3379,7 +3468,7 @@
   SwVfpRegister flt_scratch = s0;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
@@ -3404,9 +3493,7 @@
   // integer value.
   __ mov(ip, Operand(0));
   __ StoreToSafepointRegisterSlot(ip, reg);
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   if (!reg.is(r0)) __ mov(reg, r0);
 
   // Done. Put the value in dbl_scratch into the value of the allocated heap
@@ -3415,7 +3502,6 @@
   __ sub(ip, reg, Operand(kHeapObjectTag));
   __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
   __ StoreToSafepointRegisterSlot(reg, reg);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3455,12 +3541,9 @@
   Register reg = ToRegister(instr->result());
   __ mov(reg, Operand(0));
 
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   __ StoreToSafepointRegisterSlot(r0, reg);
-  __ PopSafepointRegisters();
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index caa85d2..8a4ea27 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -57,7 +57,8 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -137,7 +138,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -172,12 +173,24 @@
   bool GenerateDeferredCode();
   bool GenerateSafepointTable();
 
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
                 LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
   void CallRuntime(const Runtime::Function* function,
                    int num_arguments,
                    LInstruction* instr);
+
   void CallRuntime(Runtime::FunctionId id,
                    int num_arguments,
                    LInstruction* instr) {
@@ -185,6 +198,10 @@
     CallRuntime(function, num_arguments, instr);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -193,7 +210,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode);
+
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -292,6 +311,48 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    PushSafepointRegistersScope(LCodeGen* codegen,
+                                Safepoint::Kind kind)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = kind;
+
+      switch (codegen_->expected_safepoint_kind_) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PushSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PushSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    ~PushSafepointRegistersScope() {
+      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+      ASSERT((kind & Safepoint::kWithRegisters) != 0);
+      switch (kind) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PopSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PopSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index e3bb879..6a095d3 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -32,7 +32,7 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 
@@ -839,11 +839,7 @@
 }
 
 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
-  UNREACHABLE();
-#else
   vmov(dst, r0, r1);
-#endif
 }
 
 
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
deleted file mode 100644
index 945cdeb..0000000
--- a/src/arm/register-allocator-arm-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
-}
-
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.  The mapping is:
-//
-// r0 <-> 0
-// r1 <-> 1
-// r2 <-> 2
-// r3 <-> 3
-// r4 <-> 4
-// r5 <-> 5
-// r6 <-> 6
-// r7 <-> 7
-// r9 <-> 8
-// r10 <-> 9
-// ip <-> 10
-// lr <-> 11
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // r0
-    1,   // r1
-    2,   // r2
-    3,   // r3
-    4,   // r4
-    5,   // r5
-    6,   // r6
-    7,   // r7
-    -1,  // cp
-    8,   // r9
-    9,   // r10
-    -1,  // fp
-    10,  // ip
-    -1,  // sp
-    11,  // lr
-    -1   // pc
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] =
-      { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
deleted file mode 100644
index 3b35574..0000000
--- a/src/arm/register-allocator-arm.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  UNIMPLEMENTED();
-}
-
-
-void Result::ToRegister(Register target) {
-  UNIMPLEMENTED();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // No byte registers on ARM.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h
deleted file mode 100644
index fdbc88f..0000000
--- a/src/arm/register-allocator-arm.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  // No registers are currently managed by the register allocator on ARM.
-  static const int kNumRegisters = 0;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 7174088..ccd79d3 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -938,12 +938,7 @@
   // 2*sreg and 2*sreg+1.
   char buffer[2 * sizeof(vfp_register[0])];
   memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
   memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
-  memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
-  memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
 }
 
 
@@ -980,12 +975,7 @@
   // Read the bits from the unsigned integer vfp_register[] array
   // into the double precision floating point value and return it.
   char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
-  memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
-  memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
   memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
   memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
   return(dm_val);
 }
@@ -1618,7 +1608,7 @@
       address += 2;
     }
   }
-  ASSERT_EQ(((intptr_t)address) - operand_size, end_address);
+  ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
 }
 
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index a71a4c5..47d675b 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
diff --git a/src/arm/virtual-frame-arm-inl.h b/src/arm/virtual-frame-arm-inl.h
deleted file mode 100644
index 6a7902a..0000000
--- a/src/arm/virtual-frame-arm-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
-#define V8_VIRTUAL_FRAME_ARM_INL_H_
-
-#include "assembler-arm.h"
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
-// file if such a thing existed.
-MemOperand VirtualFrame::ParameterAt(int index) {
-  // Index -1 corresponds to the receiver.
-  ASSERT(-1 <= index);  // -1 is the receiver.
-  ASSERT(index <= parameter_count());
-  return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-}
-
-  // The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  return ParameterAt(-1);
-}
-
-
-void VirtualFrame::Forget(int count) {
-  SpillAll();
-  LowerHeight(count);
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
deleted file mode 100644
index a852d6e..0000000
--- a/src/arm/virtual-frame-arm.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToR1R0() {
-  // Shuffle things around so the top of stack is in r0 and r1.
-  MergeTOSTo(R0_R1_TOS);
-  // Pop the two registers off the stack so they are detached from the frame.
-  LowerHeight(2);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR1() {
-  // Shuffle things around so the top of stack is only in r1.
-  MergeTOSTo(R1_TOS);
-  // Pop the register off the stack so it is detached from the frame.
-  LowerHeight(1);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR0() {
-  // Shuffle things around so the top of stack only in r0.
-  MergeTOSTo(R0_TOS);
-  // Pop the register off the stack so it is detached from the frame.
-  LowerHeight(1);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
-  if (Equals(expected)) return;
-  ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
-         expected->tos_known_smi_map_);
-  ASSERT(expected->IsCompatibleWith(this));
-  MergeTOSTo(expected->top_of_stack_state_, cond);
-  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
-  if (Equals(expected)) return;
-  tos_known_smi_map_ &= expected->tos_known_smi_map_;
-  MergeTOSTo(expected->top_of_stack_state_, cond);
-  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTOSTo(
-    VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
-#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
-  switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
-    case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
-      __ pop(r0, cond);
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
-      __ pop(r1, cond);
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
-      __ push(r0, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R0_TOS):
-      break;
-    case CASE_NUMBER(R0_TOS, R1_TOS):
-      __ mov(r1, r0, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R0_R1_TOS):
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R1_R0_TOS):
-      __ mov(r1, r0, LeaveCC, cond);
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
-      __ push(r1, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R0_TOS):
-      __ mov(r0, r1, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R1_TOS):
-      break;
-    case CASE_NUMBER(R1_TOS, R0_R1_TOS):
-      __ mov(r0, r1, LeaveCC, cond);
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R1_R0_TOS):
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
-      __ Push(r1, r0, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R0_TOS):
-      __ push(r1, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R1_TOS):
-      __ push(r1, cond);
-      __ mov(r1, r0, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
-      __ Swap(r0, r1, ip, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
-      __ Push(r0, r1, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R0_TOS):
-      __ push(r0, cond);
-      __ mov(r0, r1, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R1_TOS):
-      __ push(r0, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
-      __ Swap(r0, r1, ip, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
-      break;
-    default:
-      UNREACHABLE();
-#undef CASE_NUMBER
-  }
-  // A conditional merge will be followed by a conditional branch and the
-  // fall-through code will have an unchanged virtual frame state.  If the
-  // merge is unconditional ('al'ways) then it might be followed by a fall
-  // through.  We need to update the virtual frame state to match the code we
-  // are falling into.  The final case is an unconditional merge followed by an
-  // unconditional branch, in which case it doesn't matter what we do to the
-  // virtual frame state, because the virtual frame will be invalidated.
-  if (cond == al) {
-    top_of_stack_state_ = expected_top_of_stack_state;
-  }
-}
-
-
-void VirtualFrame::Enter() {
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  // Verify that r1 contains a JS function.  The following code relies
-  // on r2 being available for use.
-  if (FLAG_debug_code) {
-    Label map_check, done;
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(ne, &map_check);
-    __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
-    __ bind(&map_check);
-    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-    __ b(eq, &done);
-    __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
-    __ bind(&done);
-  }
-#endif  // DEBUG
-
-  // We are about to push four values to the frame.
-  Adjust(4);
-  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-  // Adjust FP to point to saved FP.
-  __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Drop the execution stack down to the frame pointer and restore the caller
-  // frame pointer and return address.
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    Adjust(count);
-    // Initialize stack slots with 'undefined' value.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-    if (count < kLocalVarBound) {
-      // For less locals the unrolled loop is more compact.
-      for (int i = 0; i < count; i++) {
-        __ push(ip);
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      __ mov(r1, Operand(count));
-      __ bind(&alloc_locals_loop);
-      __ push(ip);
-      __ sub(r1, r1, Operand(1), SetCC);
-      __ b(ne, &alloc_locals_loop);
-    }
-  } else {
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-  }
-  // Check the stack for overflow or a break request.
-  masm()->cmp(sp, Operand(r2));
-  StackCheckStub stub;
-  // Call the stub if lower.
-  masm()->mov(ip,
-              Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                      RelocInfo::CODE_TARGET),
-              LeaveCC,
-              lo);
-  masm()->Call(ip, lo);
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  // Grow the expression stack by handler size less one (the return
-  // address in lr is already counted by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
-  // InvokeFunction requires function in r1.
-  PopToR1();
-  SpillAll();
-
-  // +1 for receiver.
-  Forget(arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(r1, count, CALL_FUNCTION);
-  // Restore the context.
-  __ ldr(cp, Context());
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  SpillAll();
-  Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  SpillAll();
-  Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                 InvokeJSFlags flags,
-                                 int arg_count) {
-  Forget(arg_count);
-  __ InvokeBuiltin(id, flags);
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  PopToR0();
-  SpillAll();
-  __ mov(r2, Operand(name));
-  CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name,
-                               bool is_contextual,
-                               StrictModeFlag strict_mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-  PopToR0();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    SpillAll();
-    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    EmitPop(r1);
-    SpillAll();
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ mov(r2, Operand(name));
-  CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  PopToR1R0();
-  SpillAll();
-  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  PopToR1R0();
-  SpillAll();
-  EmitPop(r2);
-  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  switch (code->kind()) {
-    case Code::CALL_IC:
-    case Code::KEYED_CALL_IC:
-    case Code::FUNCTION:
-      break;
-    case Code::KEYED_LOAD_IC:
-    case Code::LOAD_IC:
-    case Code::KEYED_STORE_IC:
-    case Code::STORE_IC:
-      ASSERT(dropped_args == 0);
-      break;
-    case Code::BUILTIN:
-      ASSERT(*code == Isolate::Current()->builtins()->builtin(
-          Builtins::kJSConstructCall));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  Forget(dropped_args);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-}
-
-
-//    NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
-const bool VirtualFrame::kR0InUse[TOS_STATES] =
-    { false,            true,   false,  true,      true };
-const bool VirtualFrame::kR1InUse[TOS_STATES] =
-    { false,            false,  true,   true,      true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
-    { 0,                1,      1,      2,         2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
-    { r0,               r0,     r1,     r1,        r0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
-    { r0,               r0,     r1,     r0,        r1 };
-const Register VirtualFrame::kAllocatedRegisters[
-    VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
-// Popping is done by the transition implied by kStateAfterPop.  Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
-    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush.  Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
-    { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  // Discard elements from the virtual frame and free any registers.
-  int num_virtual_elements = kVirtualElements[top_of_stack_state_];
-  while (num_virtual_elements > 0) {
-    Pop();
-    num_virtual_elements--;
-    count--;
-    if (count == 0) return;
-  }
-  if (count == 0) return;
-  __ add(sp, sp, Operand(count * kPointerSize));
-  LowerHeight(count);
-}
-
-
-void VirtualFrame::Pop() {
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    __ add(sp, sp, Operand(kPointerSize));
-  } else {
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  LowerHeight(1);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    __ pop(reg);
-  } else {
-    __ mov(reg, kTopRegister[top_of_stack_state_]);
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  LowerHeight(1);
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR0() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r0, MemOperand(sp, 0));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      break;
-    case R1_TOS:
-      __ push(r1);
-      __ mov(r0, r1);
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      __ mov(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r1, MemOperand(sp, 0));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      __ mov(r1, r0);
-      break;
-    case R1_TOS:
-      __ push(r1);
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      __ mov(r1, r0);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1R0() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r1, MemOperand(sp, 0));
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      __ mov(r1, r0);
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R1_TOS:
-      __ push(r1);
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      __ Swap(r0, r1, ip);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-Register VirtualFrame::Peek() {
-  AssertIsNotSpilled();
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-    Register answer = kTopRegister[top_of_stack_state_];
-    __ pop(answer);
-    return answer;
-  } else {
-    return kTopRegister[top_of_stack_state_];
-  }
-}
-
-
-Register VirtualFrame::Peek2() {
-  AssertIsNotSpilled();
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-    case R0_TOS:
-    case R0_R1_TOS:
-      MergeTOSTo(R0_R1_TOS);
-      return r1;
-    case R1_TOS:
-    case R1_R0_TOS:
-      MergeTOSTo(R1_R0_TOS);
-      return r0;
-    default:
-      UNREACHABLE();
-      return no_reg;
-  }
-}
-
-
-void VirtualFrame::Dup() {
-  if (SpilledScope::is_spilled()) {
-    __ ldr(ip, MemOperand(sp, 0));
-    __ push(ip);
-  } else {
-    switch (top_of_stack_state_) {
-      case NO_TOS_REGISTERS:
-        __ ldr(r0, MemOperand(sp, 0));
-        top_of_stack_state_ = R0_TOS;
-        break;
-      case R0_TOS:
-        __ mov(r1, r0);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_TOS:
-        __ mov(r0, r1);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R0_R1_TOS:
-        __ push(r1);
-        __ mov(r1, r0);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_R0_TOS:
-        __ push(r0);
-        __ mov(r0, r1);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-  RaiseHeight(1, tos_known_smi_map_ & 1);
-}
-
-
-void VirtualFrame::Dup2() {
-  if (SpilledScope::is_spilled()) {
-    __ ldr(ip, MemOperand(sp, kPointerSize));
-    __ push(ip);
-    __ ldr(ip, MemOperand(sp, kPointerSize));
-    __ push(ip);
-  } else {
-    switch (top_of_stack_state_) {
-      case NO_TOS_REGISTERS:
-        __ ldr(r0, MemOperand(sp, 0));
-        __ ldr(r1, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R0_TOS:
-        __ push(r0);
-        __ ldr(r1, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_TOS:
-        __ push(r1);
-        __ ldr(r0, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R1_R0_TOS;
-        break;
-      case R0_R1_TOS:
-        __ Push(r1, r0);
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_R0_TOS:
-        __ Push(r0, r1);
-        top_of_stack_state_ = R1_R0_TOS;
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-  RaiseHeight(2, tos_known_smi_map_ & 3);
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
-  ASSERT(but_not_to_this_one.is(r0) ||
-         but_not_to_this_one.is(r1) ||
-         but_not_to_this_one.is(no_reg));
-  LowerHeight(1);
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    if (but_not_to_this_one.is(r0)) {
-      __ pop(r1);
-      return r1;
-    } else {
-      __ pop(r0);
-      return r0;
-    }
-  } else {
-    Register answer = kTopRegister[top_of_stack_state_];
-    ASSERT(!answer.is(but_not_to_this_one));
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-    return answer;
-  }
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
-  if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
-    __ push(kBottomRegister[top_of_stack_state_]);
-    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (reg.is(cp)) {
-    // If we are pushing cp then we are about to make a call and things have to
-    // be pushed to the physical stack.  There's nothing to be gained my moving
-    // to a TOS register and then pushing that, we might as well push to the
-    // physical stack immediately.
-    MergeTOSTo(NO_TOS_REGISTERS);
-    __ push(reg);
-    return;
-  }
-  if (SpilledScope::is_spilled()) {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    __ push(reg);
-    return;
-  }
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    if (reg.is(r0)) {
-      top_of_stack_state_ = R0_TOS;
-      return;
-    }
-    if (reg.is(r1)) {
-      top_of_stack_state_ = R1_TOS;
-      return;
-    }
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  Register dest = kTopRegister[top_of_stack_state_];
-  __ Move(dest, reg);
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
-  if (this_far_down < kTOSKnownSmiMapSize) {
-    tos_known_smi_map_ &= ~(1 << this_far_down);
-  }
-  if (this_far_down == 0) {
-    Pop();
-    Register dest = GetTOSRegister();
-    if (dest.is(reg)) {
-      // We already popped one item off the top of the stack.  If the only
-      // free register is the one we were asked to push then we have been
-      // asked to push a register that was already in use, which cannot
-      // happen.  It therefore folows that there are two free TOS registers:
-      ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-      dest = dest.is(r0) ? r1 : r0;
-    }
-    __ mov(dest, reg);
-    EmitPush(dest);
-  } else if (this_far_down == 1) {
-    int virtual_elements = kVirtualElements[top_of_stack_state_];
-    if (virtual_elements < 2) {
-      __ str(reg, ElementAt(this_far_down));
-    } else {
-      ASSERT(virtual_elements == 2);
-      ASSERT(!reg.is(r0));
-      ASSERT(!reg.is(r1));
-      Register dest = kBottomRegister[top_of_stack_state_];
-      __ mov(dest, reg);
-    }
-  } else {
-    ASSERT(this_far_down >= 2);
-    ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
-    __ str(reg, ElementAt(this_far_down));
-  }
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
-  if (SpilledScope::is_spilled()) return r0;
-
-  EnsureOneFreeTOSRegister();
-  return kTopRegister[kStateAfterPush[top_of_stack_state_]];
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (SpilledScope::is_spilled()) {
-    __ mov(r0, operand);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ mov(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (SpilledScope::is_spilled()) {
-    __ ldr(r0, operand);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ ldr(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
-  RaiseHeight(1, 0);
-  if (SpilledScope::is_spilled()) {
-    __ LoadRoot(r0, index);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ LoadRoot(kTopRegister[top_of_stack_state_], index);
-}
-
-
-void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
-  ASSERT(SpilledScope::is_spilled());
-  Adjust(count);
-  __ stm(db_w, sp, src_regs);
-}
-
-
-void VirtualFrame::SpillAll() {
-  switch (top_of_stack_state_) {
-    case R1_R0_TOS:
-      masm()->push(r0);
-      // Fall through.
-    case R1_TOS:
-      masm()->push(r1);
-      top_of_stack_state_ = NO_TOS_REGISTERS;
-      break;
-    case R0_R1_TOS:
-      masm()->push(r1);
-      // Fall through.
-    case R0_TOS:
-      masm()->push(r0);
-      top_of_stack_state_ = NO_TOS_REGISTERS;
-      // Fall through.
-    case NO_TOS_REGISTERS:
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  ASSERT(register_allocation_map_ == 0);  // Not yet implemented.
-}
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
deleted file mode 100644
index 6d67e70..0000000
--- a/src/arm/virtual-frame-arm.h
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
-#define V8_ARM_VIRTUAL_FRAME_ARM_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  class RegisterAllocationScope;
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, and keeps it spilled.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    explicit SpilledScope(VirtualFrame* frame)
-      : old_is_spilled_(
-          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-      if (frame != NULL) {
-        if (!old_is_spilled_) {
-          frame->SpillAll();
-        } else {
-          frame->AssertIsSpilled();
-        }
-      }
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
-    }
-    ~SpilledScope() {
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
-          old_is_spilled_);
-    }
-    static bool is_spilled() {
-      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
-    }
-
-   private:
-    int old_is_spilled_;
-
-    SpilledScope() { }
-
-    friend class RegisterAllocationScope;
-  };
-
-  class RegisterAllocationScope BASE_EMBEDDED {
-   public:
-    // A utility class to introduce a scope where the virtual frame
-    // is not spilled, ie. where register allocation occurs.  Eventually
-    // when RegisterAllocationScope is ubiquitous it can be removed
-    // along with the (by then unused) SpilledScope class.
-    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
-    inline ~RegisterAllocationScope();
-
-   private:
-    CodeGenerator* cgen_;
-    bool old_is_spilled_;
-
-    RegisterAllocationScope() { }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct an invalid virtual frame, used by JumpTargets.
-  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  inline CodeGenerator* cgen() const;
-  inline MacroAssembler* masm();
-
-  // The number of elements on the virtual frame.
-  int element_count() const { return element_count_; }
-
-  // The height of the virtual expression stack.
-  inline int height() const;
-
-  bool is_used(int num) {
-    switch (num) {
-      case 0: {  // r0.
-        return kR0InUse[top_of_stack_state_];
-      }
-      case 1: {  // r1.
-        return kR1InUse[top_of_stack_state_];
-      }
-      case 2:
-      case 3:
-      case 4:
-      case 5:
-      case 6: {  // r2 to r6.
-        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
-        ASSERT(num >= kFirstAllocatedRegister);
-        if ((register_allocation_map_ &
-             (1 << (num - kFirstAllocatedRegister))) == 0) {
-          return false;
-        } else {
-          return true;
-        }
-      }
-      default: {
-        ASSERT(num < kFirstAllocatedRegister ||
-               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
-        return false;
-      }
-    }
-  }
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call).  No code is emitted except to bring the
-  // frame to a spilled state.
-  void Forget(int count);
-
-  // Spill all values from the frame to memory.
-  void SpillAll();
-
-  void AssertIsSpilled() const {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    ASSERT(register_allocation_map_ == 0);
-  }
-
-  void AssertIsNotSpilled() {
-    ASSERT(!SpilledScope::is_spilled());
-  }
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    UNIMPLEMENTED();
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).  Unimplemented.
-  Register SpillAnyRegister();
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected, Condition cond = al);
-  void MergeTo(const VirtualFrame* expected, Condition cond = al);
-
-  // Checks whether this frame can be branched to by the other frame.
-  bool IsCompatibleWith(const VirtualFrame* other) const {
-    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
-  }
-
-  inline void ForgetTypeInfo() {
-    tos_known_smi_map_ = 0;
-  }
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by elements in the virtual frame. This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site. No spill code emitted. Value to return should be in r0.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 5;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // The current top of the expression stack as an assembly operand.
-  MemOperand Top() {
-    AssertIsSpilled();
-    return MemOperand(sp, 0);
-  }
-
-  // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) {
-    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
-    ASSERT(adjusted_index >= 0);
-    return MemOperand(sp, adjusted_index * kPointerSize);
-  }
-
-  bool KnownSmiAt(int index) {
-    if (index >= kTOSKnownSmiMapSize) return false;
-    return (tos_known_smi_map_ & (1 << index)) != 0;
-  }
-
-  // A frame-allocated local as an assembly operand.
-  inline MemOperand LocalAt(int index);
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // The function frame slot.
-  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
-  // The context frame slot.
-  MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
-  // A parameter as an assembly operand.
-  inline MemOperand ParameterAt(int index);
-
-  // The receiver frame slot.
-  inline MemOperand Receiver();
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline void CallStub(CodeStub* stub, int arg_count);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  void CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  void CallRuntime(const Runtime::Function* f, int arg_count);
-  void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flag,
-                     int arg_count);
-
-  // Call load IC. Receiver is on the stack and is consumed. Result is returned
-  // in r0.
-  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
-  // Call store IC. If the load is contextual, value is found on top of the
-  // frame. If not, value and receiver are on the frame. Both are consumed.
-  // Result is returned in r0.
-  void CallStoreIC(Handle<String> name, bool is_contextual,
-                   StrictModeFlag strict_mode);
-
-  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
-  // Result is returned in r0.
-  void CallKeyedLoadIC();
-
-  // Call keyed store IC. Value, key and receiver are on the stack. All three
-  // are consumed. Result is returned in r0.
-  void CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call into an IC stub given the number of arguments it removes
-  // from the stack.  Register arguments to the IC stub are implicit,
-  // and depend on the type of IC stub.
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      int dropped_args);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Pop an element from the top of the expression stack.  Discards
-  // the result.
-  void Pop();
-
-  // Pop an element from the top of the expression stack.  The register
-  // will be one normally used for the top of stack register allocation
-  // so you can't hold on to it if you push on the stack.
-  Register PopToRegister(Register but_not_to_this_one = no_reg);
-
-  // Look at the top of the stack.  The register returned is aliased and
-  // must be copied to a scratch register before modification.
-  Register Peek();
-
-  // Look at the value beneath the top of the stack.  The register returned is
-  // aliased and must be copied to a scratch register before modification.
-  Register Peek2();
-
-  // Duplicate the top of stack.
-  void Dup();
-
-  // Duplicate the two elements on top of stack.
-  void Dup2();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r0.
-  void SpillAllButCopyTOSToR0();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r1.
-  void SpillAllButCopyTOSToR1();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r1
-  // and the next value on the stack in r0.
-  void SpillAllButCopyTOSToR1R0();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-
-  // Takes the top two elements and puts them in r0 (top element) and r1
-  // (second element).
-  void PopToR1R0();
-
-  // Takes the top element and puts it in r1.
-  void PopToR1();
-
-  // Takes the top element and puts it in r0.
-  void PopToR0();
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPushRoot(Heap::RootListIndex index);
-
-  // Overwrite the nth thing on the stack.  If the nth position is in a
-  // register then this turns into a mov, otherwise an str.  Afterwards
-  // you can still use the register even if it is a register that can be
-  // used for TOS (r0 or r1).
-  void SetElementAt(Register reg, int this_far_down);
-
-  // Get a register which is free and which must be immediately used to
-  // push on the top of the stack.
-  Register GetTOSRegister();
-
-  // Push multiple registers on the stack and the virtual frame
-  // Register are selected by setting bit in src_regs and
-  // are pushed in decreasing order: r15 .. r0.
-  void EmitPushMultiple(int count, int src_regs);
-
-  static Register scratch0() { return r7; }
-  static Register scratch1() { return r9; }
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  // 5 states for the top of stack, which can be in memory or in r0 and r1.
-  enum TopOfStack {
-    NO_TOS_REGISTERS,
-    R0_TOS,
-    R1_TOS,
-    R1_R0_TOS,
-    R0_R1_TOS,
-    TOS_STATES
-  };
-
-  static const int kMaxTOSRegisters = 2;
-
-  static const bool kR0InUse[TOS_STATES];
-  static const bool kR1InUse[TOS_STATES];
-  static const int kVirtualElements[TOS_STATES];
-  static const TopOfStack kStateAfterPop[TOS_STATES];
-  static const TopOfStack kStateAfterPush[TOS_STATES];
-  static const Register kTopRegister[TOS_STATES];
-  static const Register kBottomRegister[TOS_STATES];
-
-  // We allocate up to 5 locals in registers.
-  static const int kNumberOfAllocatedRegisters = 5;
-  // r2 to r6 are allocated to locals.
-  static const int kFirstAllocatedRegister = 2;
-
-  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
-  static Register AllocatedRegister(int r) {
-    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
-    return kAllocatedRegisters[r];
-  }
-
-  // The number of elements on the stack frame.
-  int element_count_;
-  TopOfStack top_of_stack_state_:3;
-  int register_allocation_map_:kNumberOfAllocatedRegisters;
-  static const int kTOSKnownSmiMapSize = 4;
-  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the sp register).  For now since everything is in memory it is given
-  // by the number of elements on the not-very-virtual stack frame.
-  int stack_pointer() { return element_count_ - 1; }
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count() const;
-  inline int local_count() const;
-
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register).  The parameters, receiver, function, and context
-  // are below the frame pointer.
-  inline int frame_pointer() const;
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame.  It is immediately
-  // below the frame pointer.
-  inline int context_index();
-
-  // The index of the function slot in the frame.  It is below the frame
-  // pointer and context slot.
-  inline int function_index();
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lies the return address.
-  inline int local0_index() const;
-
-  // The index of the base of the expression stack.
-  inline int expression_base_index() const;
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  inline int fp_relative(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // If all top-of-stack registers are in use then the lowest one is pushed
-  // onto the physical stack and made free.
-  void EnsureOneFreeTOSRegister();
-
-  // Emit instructions to get the top of stack state from where we are to where
-  // we want to be.
-  void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
-
-  inline bool Equals(const VirtualFrame* other);
-
-  inline void LowerHeight(int count) {
-    element_count_ -= count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = 0;
-    } else {
-      tos_known_smi_map_ >>= count;
-    }
-  }
-
-  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
-    ASSERT(count >= 32 || known_smi_map < (1u << count));
-    element_count_ += count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = known_smi_map;
-    } else {
-      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
-    }
-  }
-
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_VIRTUAL_FRAME_ARM_H_
diff --git a/src/assembler.cc b/src/assembler.cc
index ff48772..ca30e19 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -492,7 +492,8 @@
            target_address());
   } else if (IsPosition(rmode_)) {
     PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
-  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
+  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
+             Isolate::Current()->deoptimizer_data() != NULL) {
     // Depotimization bailouts are stored as runtime entries.
     int id = Deoptimizer::GetDeoptimizationId(
         target_address(), Deoptimizer::EAGER);
diff --git a/src/assembler.h b/src/assembler.h
index 62fe04d..e8cecc3 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
@@ -111,7 +111,6 @@
   friend class Assembler;
   friend class RegexpAssembler;
   friend class Displacement;
-  friend class ShadowTarget;
   friend class RegExpMacroAssemblerIrregexp;
 };
 
diff --git a/src/ast-inl.h b/src/ast-inl.h
index 6021fd9..d80684a 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 #include "v8.h"
 
 #include "ast.h"
-#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -62,7 +61,7 @@
 IterationStatement::IterationStatement(ZoneStringList* labels)
     : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
       body_(NULL),
-      continue_target_(JumpTarget::BIDIRECTIONAL),
+      continue_target_(),
       osr_entry_id_(GetNextId()) {
 }
 
diff --git a/src/ast.cc b/src/ast.cc
index 9a263a5..6ea8c27 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,10 +28,10 @@
 #include "v8.h"
 
 #include "ast.h"
-#include "jump-target-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -291,7 +291,7 @@
 }
 
 
-void TargetCollector::AddTarget(BreakTarget* target) {
+void TargetCollector::AddTarget(Label* target) {
   // Add the label to the collector, but discard duplicates.
   int length = targets_->length();
   for (int i = 0; i < length; i++) {
@@ -301,79 +301,6 @@
 }
 
 
-bool Expression::GuaranteedSmiResult() {
-  BinaryOperation* node = AsBinaryOperation();
-  if (node == NULL) return false;
-  Token::Value op = node->op();
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-    case Token::AND:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-    case Token::BIT_XOR:
-    case Token::SHL:
-      return false;
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_AND: {
-      Literal* left = node->left()->AsLiteral();
-      Literal* right = node->right()->AsLiteral();
-      if (left != NULL && left->handle()->IsSmi()) {
-        int value = Smi::cast(*left->handle())->value();
-        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
-          // Result of bitwise or is always a negative Smi.
-          return true;
-        }
-        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
-          // Result of bitwise and is always a positive Smi.
-          return true;
-        }
-      }
-      if (right != NULL && right->handle()->IsSmi()) {
-        int value = Smi::cast(*right->handle())->value();
-        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
-          // Result of bitwise or is always a negative Smi.
-          return true;
-        }
-        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
-          // Result of bitwise and is always a positive Smi.
-          return true;
-        }
-      }
-      return false;
-      break;
-    }
-    case Token::SAR:
-    case Token::SHR: {
-      Literal* right = node->right()->AsLiteral();
-       if (right != NULL && right->handle()->IsSmi()) {
-        int value = Smi::cast(*right->handle())->value();
-        if ((value & 0x1F) > 1 ||
-            (op == Token::SAR && (value & 0x1F) == 1)) {
-          return true;
-        }
-       }
-       return false;
-       break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  return false;
-}
-
-
-void Expression::CopyAnalysisResultsFrom(Expression* other) {
-  bitfields_ = other->bitfields_;
-  type_ = other->type_;
-}
-
-
 bool UnaryOperation::ResultOverwriteAllowed() {
   switch (op_) {
     case Token::BIT_NOT:
@@ -416,7 +343,6 @@
   left_ = assignment->target();
   right_ = assignment->value();
   pos_ = assignment->position();
-  CopyAnalysisResultsFrom(assignment);
 }
 
 
@@ -549,7 +475,7 @@
   } else if (is_monomorphic_) {
     monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
     if (monomorphic_receiver_type_->has_external_array_elements()) {
-      SetExternalArrayType(oracle->GetKeyedLoadExternalArrayType(this));
+      set_external_array_type(oracle->GetKeyedLoadExternalArrayType(this));
     }
   }
 }
@@ -569,7 +495,19 @@
     // Record receiver type for monomorphic keyed loads.
     monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
     if (monomorphic_receiver_type_->has_external_array_elements()) {
-      SetExternalArrayType(oracle->GetKeyedStoreExternalArrayType(this));
+      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
+    }
+  }
+}
+
+
+void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  if (is_monomorphic_) {
+    // Record receiver type for monomorphic keyed loads.
+    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+    if (monomorphic_receiver_type_->has_external_array_elements()) {
+      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
     }
   }
 }
diff --git a/src/ast.h b/src/ast.h
index d8bc18e..7f52c88 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -31,7 +31,6 @@
 #include "execution.h"
 #include "factory.h"
 #include "jsregexp.h"
-#include "jump-target.h"
 #include "runtime.h"
 #include "token.h"
 #include "variables.h"
@@ -88,7 +87,6 @@
   V(CallNew)                                    \
   V(CallRuntime)                                \
   V(UnaryOperation)                             \
-  V(IncrementOperation)                         \
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
@@ -134,6 +132,7 @@
 #undef DECLARE_TYPE_ENUM
 
   static const int kNoNumber = -1;
+  static const int kFunctionEntryId = 2;  // Using 0 could disguise errors.
 
   AstNode() : id_(GetNextId()) {
     Isolate* isolate = Isolate::Current();
@@ -220,7 +219,12 @@
     kTest
   };
 
-  Expression() : bitfields_(0) {}
+  Expression() {}
+
+  virtual int position() const {
+    UNREACHABLE();
+    return 0;
+  }
 
   virtual Expression* AsExpression()  { return this; }
 
@@ -266,70 +270,15 @@
     return Handle<Map>();
   }
 
-  // Static type information for this expression.
-  StaticType* type() { return &type_; }
-
-  // True if the expression is a loop condition.
-  bool is_loop_condition() const {
-    return LoopConditionField::decode(bitfields_);
+  ExternalArrayType external_array_type() const {
+    return external_array_type_;
   }
-  void set_is_loop_condition(bool flag) {
-    bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
-        LoopConditionField::encode(flag);
-  }
-
-  // The value of the expression is guaranteed to be a smi, because the
-  // top operation is a bit operation with a mask, or a shift.
-  bool GuaranteedSmiResult();
-
-  // AST analysis results.
-  void CopyAnalysisResultsFrom(Expression* other);
-
-  // True if the expression rooted at this node can be compiled by the
-  // side-effect free compiler.
-  bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
-  void set_side_effect_free(bool is_side_effect_free) {
-    bitfields_ &= ~SideEffectFreeField::mask();
-    bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
-  }
-
-  // Will the use of this expression treat -0 the same as 0 in all cases?
-  // If so, we can return 0 instead of -0 if we want to, to optimize code.
-  bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
-  void set_no_negative_zero(bool no_negative_zero) {
-    bitfields_ &= ~NoNegativeZeroField::mask();
-    bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
-  }
-
-  // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
-  // be applied to the value of this expression?
-  // If so, we may be able to optimize the calculation of the value.
-  bool to_int32() { return ToInt32Field::decode(bitfields_); }
-  void set_to_int32(bool to_int32) {
-    bitfields_ &= ~ToInt32Field::mask();
-    bitfields_ |= ToInt32Field::encode(to_int32);
-  }
-
-  // How many bitwise logical or shift operators are used in this expression?
-  int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
-  void set_num_bit_ops(int num_bit_ops) {
-    bitfields_ &= ~NumBitOpsField::mask();
-    num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
-    bitfields_ |= NumBitOpsField::encode(num_bit_ops);
+  void set_external_array_type(ExternalArrayType array_type) {
+    external_array_type_ = array_type;
   }
 
  private:
-  static const int kMaxNumBitOps = (1 << 5) - 1;
-
-  uint32_t bitfields_;
-  StaticType type_;
-
-  // Using template BitField<type, start, size>.
-  class SideEffectFreeField : public BitField<bool, 0, 1> {};
-  class NoNegativeZeroField : public BitField<bool, 1, 1> {};
-  class ToInt32Field : public BitField<bool, 2, 1> {};
-  class NumBitOpsField : public BitField<int, 3, 5> {};
-  class LoopConditionField: public BitField<bool, 8, 1> {};
+  ExternalArrayType external_array_type_;
 };
 
 
@@ -360,7 +309,7 @@
   virtual BreakableStatement* AsBreakableStatement() { return this; }
 
   // Code generation
-  BreakTarget* break_target() { return &break_target_; }
+  Label* break_target() { return &break_target_; }
 
   // Testers.
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
@@ -375,7 +324,7 @@
  private:
   ZoneStringList* labels_;
   Type type_;
-  BreakTarget break_target_;
+  Label break_target_;
   int entry_id_;
   int exit_id_;
 };
@@ -446,7 +395,7 @@
   virtual int ContinueId() const = 0;
 
   // Code generation
-  BreakTarget* continue_target()  { return &continue_target_; }
+  Label* continue_target()  { return &continue_target_; }
 
  protected:
   explicit inline IterationStatement(ZoneStringList* labels);
@@ -457,7 +406,7 @@
 
  private:
   Statement* body_;
-  BreakTarget continue_target_;
+  Label continue_target_;
   int osr_entry_id_;
 };
 
@@ -693,10 +642,10 @@
     CHECK(!is_default());
     return label_;
   }
-  JumpTarget* body_target() { return &body_target_; }
+  Label* body_target() { return &body_target_; }
   ZoneList<Statement*>* statements() const { return statements_; }
 
-  int position() { return position_; }
+  int position() const { return position_; }
   void set_position(int pos) { position_ = pos; }
 
   int EntryId() { return entry_id_; }
@@ -708,7 +657,7 @@
 
  private:
   Expression* label_;
-  JumpTarget body_target_;
+  Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
   enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
@@ -781,23 +730,23 @@
 // stack in the compiler; this should probably be reworked.
 class TargetCollector: public AstNode {
  public:
-  explicit TargetCollector(ZoneList<BreakTarget*>* targets)
+  explicit TargetCollector(ZoneList<Label*>* targets)
       : targets_(targets) {
   }
 
   // Adds a jump target to the collector. The collector stores a pointer not
   // a copy of the target to make binding work, so make sure not to pass in
   // references to something on the stack.
-  void AddTarget(BreakTarget* target);
+  void AddTarget(Label* target);
 
   // Virtual behaviour. TargetCollectors are never part of the AST.
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
   virtual TargetCollector* AsTargetCollector() { return this; }
 
-  ZoneList<BreakTarget*>* targets() { return targets_; }
+  ZoneList<Label*>* targets() { return targets_; }
 
  private:
-  ZoneList<BreakTarget*>* targets_;
+  ZoneList<Label*>* targets_;
 };
 
 
@@ -806,16 +755,16 @@
   explicit TryStatement(Block* try_block)
       : try_block_(try_block), escaping_targets_(NULL) { }
 
-  void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
+  void set_escaping_targets(ZoneList<Label*>* targets) {
     escaping_targets_ = targets;
   }
 
   Block* try_block() const { return try_block_; }
-  ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
+  ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
 
  private:
   Block* try_block_;
-  ZoneList<BreakTarget*>* escaping_targets_;
+  ZoneList<Label*>* escaping_targets_;
 };
 
 
@@ -1246,7 +1195,7 @@
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
   bool is_synthetic() const { return type_ == SYNTHETIC; }
 
   bool IsStringLength() const { return is_string_length_; }
@@ -1260,11 +1209,6 @@
   }
   bool is_arguments_access() const { return is_arguments_access_; }
 
-  ExternalArrayType GetExternalArrayType() const { return array_type_; }
-  void SetExternalArrayType(ExternalArrayType array_type) {
-    array_type_ = array_type;
-  }
-
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
@@ -1288,7 +1232,6 @@
   bool is_function_prototype_ : 1;
   bool is_arguments_access_ : 1;
   Handle<Map> monomorphic_receiver_type_;
-  ExternalArrayType array_type_;
 };
 
 
@@ -1310,7 +1253,7 @@
 
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
 
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
@@ -1388,7 +1331,7 @@
 
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
 
  private:
   Expression* expression_;
@@ -1471,7 +1414,7 @@
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
 
   // Bailout support.
   int RightId() const { return right_id_; }
@@ -1487,59 +1430,52 @@
 };
 
 
-class IncrementOperation: public Expression {
- public:
-  IncrementOperation(Token::Value op, Expression* expr)
-      : op_(op), expression_(expr) {
-    ASSERT(Token::IsCountOp(op));
-  }
-
-  DECLARE_NODE_TYPE(IncrementOperation)
-
-  Token::Value op() const { return op_; }
-  bool is_increment() { return op_ == Token::INC; }
-  Expression* expression() const { return expression_; }
-
- private:
-  Token::Value op_;
-  Expression* expression_;
-  int pos_;
-};
-
-
 class CountOperation: public Expression {
  public:
-  CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
-      : is_prefix_(is_prefix), increment_(increment), pos_(pos),
-        assignment_id_(GetNextId()) {
-  }
+  CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
+      : op_(op),
+        is_prefix_(is_prefix),
+        expression_(expr),
+        pos_(pos),
+        assignment_id_(GetNextId()),
+        count_id_(GetNextId()) { }
 
   DECLARE_NODE_TYPE(CountOperation)
 
   bool is_prefix() const { return is_prefix_; }
   bool is_postfix() const { return !is_prefix_; }
 
-  Token::Value op() const { return increment_->op(); }
+  Token::Value op() const { return op_; }
   Token::Value binary_op() {
     return (op() == Token::INC) ? Token::ADD : Token::SUB;
   }
 
-  Expression* expression() const { return increment_->expression(); }
-  IncrementOperation* increment() const { return increment_; }
-  int position() const { return pos_; }
+  Expression* expression() const { return expression_; }
+  virtual int position() const { return pos_; }
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
   virtual bool IsInlineable() const;
 
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
   // Bailout support.
   int AssignmentId() const { return assignment_id_; }
+  int CountId() const { return count_id_; }
 
  private:
+  Token::Value op_;
   bool is_prefix_;
-  IncrementOperation* increment_;
+  bool is_monomorphic_;
+  Expression* expression_;
   int pos_;
   int assignment_id_;
+  int count_id_;
+  Handle<Map> monomorphic_receiver_type_;
 };
 
 
@@ -1558,7 +1494,7 @@
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
 
   virtual bool IsInlineable() const;
 
@@ -1653,7 +1589,7 @@
   Token::Value op() const { return op_; }
   Expression* target() const { return target_; }
   Expression* value() const { return value_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
   BinaryOperation* binary_operation() const { return binary_operation_; }
 
   // This check relies on the definition order of token in token.h.
@@ -1675,10 +1611,6 @@
   virtual Handle<Map> GetMonomorphicReceiverType() {
     return monomorphic_receiver_type_;
   }
-  ExternalArrayType GetExternalArrayType() const { return array_type_; }
-  void SetExternalArrayType(ExternalArrayType array_type) {
-    array_type_ = array_type;
-  }
 
   // Bailout support.
   int CompoundLoadId() const { return compound_load_id_; }
@@ -1699,7 +1631,6 @@
   bool is_monomorphic_;
   ZoneMapList* receiver_types_;
   Handle<Map> monomorphic_receiver_type_;
-  ExternalArrayType array_type_;
 };
 
 
@@ -1711,7 +1642,7 @@
   DECLARE_NODE_TYPE(Throw)
 
   Expression* exception() const { return exception_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
 
  private:
   Expression* exception_;
@@ -1731,8 +1662,7 @@
                   int num_parameters,
                   int start_position,
                   int end_position,
-                  bool is_expression,
-                  bool contains_loops)
+                  bool is_expression)
       : name_(name),
         scope_(scope),
         body_(body),
@@ -1745,7 +1675,6 @@
         start_position_(start_position),
         end_position_(end_position),
         is_expression_(is_expression),
-        contains_loops_(contains_loops),
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(HEAP->empty_string()),
         pretenure_(false) { }
@@ -1760,7 +1689,6 @@
   int start_position() const { return start_position_; }
   int end_position() const { return end_position_; }
   bool is_expression() const { return is_expression_; }
-  bool contains_loops() const { return contains_loops_; }
   bool strict_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
@@ -1800,8 +1728,6 @@
   int start_position_;
   int end_position_;
   bool is_expression_;
-  bool contains_loops_;
-  bool strict_mode_;
   int function_token_position_;
   Handle<String> inferred_name_;
   bool pretenure_;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d408034..56ef072 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -37,7 +37,6 @@
 // as only the stubs up to and including Instanceof allows nested stub calls.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
-  V(GenericBinaryOp)                     \
   V(TypeRecordingBinaryOp)               \
   V(StringAdd)                           \
   V(SubString)                           \
@@ -50,7 +49,6 @@
   V(Instanceof)                          \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
-  V(IntegerMod)                          \
   V(StackCheck)                          \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
@@ -164,10 +162,10 @@
   // lazily generated function should be fully optimized or not.
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
-  // GenericBinaryOpStub needs to override this.
+  // TypeRecordingBinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // GenericBinaryOpStub needs to override this.
+  // TypeRecordingBinaryOpStub needs to override this.
   virtual InlineCacheState GetICState() {
     return UNINITIALIZED;
   }
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
deleted file mode 100644
index f7da54a..0000000
--- a/src/codegen-inl.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_CODEGEN_INL_H_
-#define V8_CODEGEN_INL_H_
-
-#include "codegen.h"
-#include "compiler.h"
-#include "register-allocator-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Handle<Script> CodeGenerator::script() { return info_->script(); }
-
-bool CodeGenerator::is_eval() { return info_->is_eval(); }
-
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-bool CodeGenerator::is_strict_mode() {
-  return info_->function()->strict_mode();
-}
-
-StrictModeFlag CodeGenerator::strict_mode_flag() {
-  return is_strict_mode() ? kStrictMode : kNonStrictMode;
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_CODEGEN_INL_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index d2e7f23..4bbe6ae 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,16 +28,14 @@
 #include "v8.h"
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "prettyprinter.h"
-#include "register-allocator-inl.h"
 #include "rewriter.h"
 #include "runtime.h"
 #include "scopeinfo.h"
 #include "stub-cache.h"
-#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -61,64 +59,6 @@
 #undef __
 
 
-void CodeGenerator::ProcessDeferred() {
-  while (!deferred_.is_empty()) {
-    DeferredCode* code = deferred_.RemoveLast();
-    ASSERT(masm_ == code->masm());
-    // Record position of deferred code stub.
-    masm_->positions_recorder()->RecordStatementPosition(
-        code->statement_position());
-    if (code->position() != RelocInfo::kNoPosition) {
-      masm_->positions_recorder()->RecordPosition(code->position());
-    }
-    // Generate the code.
-    Comment cmnt(masm_, code->comment());
-    masm_->bind(code->entry_label());
-    if (code->AutoSaveAndRestore()) {
-      code->SaveRegisters();
-    }
-    code->Generate();
-    if (code->AutoSaveAndRestore()) {
-      code->RestoreRegisters();
-      code->Exit();
-    }
-  }
-}
-
-
-void DeferredCode::Exit() {
-  masm_->jmp(exit_label());
-}
-
-
-void CodeGenerator::SetFrame(VirtualFrame* new_frame,
-                             RegisterFile* non_frame_registers) {
-  RegisterFile saved_counts;
-  if (has_valid_frame()) {
-    frame_->DetachFromCodeGenerator();
-    // The remaining register reference counts are the non-frame ones.
-    allocator_->SaveTo(&saved_counts);
-  }
-
-  if (new_frame != NULL) {
-    // Restore the non-frame register references that go with the new frame.
-    allocator_->RestoreFrom(non_frame_registers);
-    new_frame->AttachToCodeGenerator();
-  }
-
-  frame_ = new_frame;
-  saved_counts.CopyTo(non_frame_registers);
-}
-
-
-void CodeGenerator::DeleteFrame() {
-  if (has_valid_frame()) {
-    frame_->DetachFromCodeGenerator();
-    frame_ = NULL;
-  }
-}
-
-
 void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
 #ifdef DEBUG
   bool print_source = false;
@@ -230,61 +170,10 @@
 #endif  // ENABLE_DISASSEMBLER
 }
 
-
-// Generate the code.  Compile the AST and assemble all the pieces into a
-// Code object.
-bool CodeGenerator::MakeCode(CompilationInfo* info) {
-  // When using Crankshaft the classic backend should never be used.
-  ASSERT(!V8::UseCrankshaft());
-  Handle<Script> script = info->script();
-  if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-    int len = String::cast(script->source())->length();
-    Counters* counters = info->isolate()->counters();
-    counters->total_old_codegen_source_size()->Increment(len);
-  }
-  if (FLAG_trace_codegen) {
-    PrintF("Classic Compiler - ");
-  }
-  MakeCodePrologue(info);
-  // Generate code.
-  const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
-  CodeGenerator cgen(&masm);
-  CodeGeneratorScope scope(Isolate::Current(), &cgen);
-  cgen.Generate(info);
-  if (cgen.HasStackOverflow()) {
-    ASSERT(!Isolate::Current()->has_pending_exception());
-    return false;
-  }
-
-  InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
-  // There is no stack check table in code generated by the classic backend.
-  code->SetNoStackCheckTable();
-  CodeGenerator::PrintCode(code, info);
-  info->SetCode(code);  // May be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (FLAG_gdbjit && !code.is_null()) {
-    GDBJITLineInfo* lineinfo =
-        masm.positions_recorder()->DetachGDBJITLineInfo();
-
-    GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
-  }
-#endif
-  return !code.is_null();
-}
-
-
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
-
 static Vector<const char> kRegexp = CStrVector("regexp");
 
-
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
   if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
@@ -299,120 +188,6 @@
 #endif
 
 
-void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int globals = 0;
-  for (int i = 0; i < length; i++) {
-    Declaration* node = declarations->at(i);
-    Variable* var = node->proxy()->var();
-    Slot* slot = var->AsSlot();
-
-    // If it was not possible to allocate the variable at compile
-    // time, we need to "declare" it at runtime to make sure it
-    // actually exists in the local context.
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      VisitDeclaration(node);
-    } else {
-      // Count global variables and functions for later processing
-      globals++;
-    }
-  }
-
-  // Return in case of no declared global functions or variables.
-  if (globals == 0) return;
-
-  // Compute array of global variable and function declarations.
-  Handle<FixedArray> array = FACTORY->NewFixedArray(2 * globals, TENURED);
-  for (int j = 0, i = 0; i < length; i++) {
-    Declaration* node = declarations->at(i);
-    Variable* var = node->proxy()->var();
-    Slot* slot = var->AsSlot();
-
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      // Skip - already processed.
-    } else {
-      array->set(j++, *(var->name()));
-      if (node->fun() == NULL) {
-        if (var->mode() == Variable::CONST) {
-          // In case this is const property use the hole.
-          array->set_the_hole(j++);
-        } else {
-          array->set_undefined(j++);
-        }
-      } else {
-        Handle<SharedFunctionInfo> function =
-            Compiler::BuildFunctionInfo(node->fun(), script());
-        // Check for stack-overflow exception.
-        if (function.is_null()) {
-          SetStackOverflow();
-          return;
-        }
-        array->set(j++, *function);
-      }
-    }
-  }
-
-  // Invoke the platform-dependent code generator to do the actual
-  // declaration the global variables and functions.
-  DeclareGlobals(array);
-}
-
-
-void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)          \
-    &CodeGenerator::Generate##Name,
-
-const CodeGenerator::InlineFunctionGenerator
-    CodeGenerator::kInlineFunctionGenerators[] = {
-        INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-        INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
-  ZoneList<Expression*>* args = node->arguments();
-  Handle<String> name = node->name();
-  const Runtime::Function* function = node->function();
-  if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
-    int lookup_index = static_cast<int>(function->function_id) -
-        static_cast<int>(Runtime::kFirstInlineFunction);
-    ASSERT(lookup_index >= 0);
-    ASSERT(static_cast<size_t>(lookup_index) <
-           ARRAY_SIZE(kInlineFunctionGenerators));
-    InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
-    (this->*generator)(args);
-    return true;
-  }
-  return false;
-}
-
-
-// Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-// known result for the test expression, with no side effects.
-CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
-    Expression* cond) {
-  if (cond == NULL) return ALWAYS_TRUE;
-
-  Literal* lit = cond->AsLiteral();
-  if (lit == NULL) return DONT_KNOW;
-
-  if (lit->IsTrue()) {
-    return ALWAYS_TRUE;
-  } else if (lit->IsFalse()) {
-    return ALWAYS_FALSE;
-  }
-
-  return DONT_KNOW;
-}
-
-
 bool CodeGenerator::RecordPositions(MacroAssembler* masm,
                                     int pos,
                                     bool right_here) {
@@ -427,34 +202,6 @@
 }
 
 
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
-}
-
-
-void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
-}
-
-
-void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
-}
-
-
-void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
-  if (FLAG_debug_info)
-    RecordPositions(masm(), stmt->condition_position(), false);
-}
-
-
-void CodeGenerator::CodeForSourcePosition(int pos) {
-  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
-    masm()->positions_recorder()->RecordPosition(pos);
-  }
-}
-
-
 const char* GenericUnaryOpStub::GetName() {
   switch (op_) {
     case Token::SUB:
diff --git a/src/codegen.h b/src/codegen.h
index aa31999..e551abf 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,7 +54,6 @@
 // shared code:
 //   CodeGenerator
 //   ~CodeGenerator
-//   ProcessDeferred
 //   Generate
 //   ComputeLazyCompile
 //   BuildFunctionInfo
@@ -68,7 +67,6 @@
 //   CodeForDoWhileConditionPosition
 //   CodeForSourcePosition
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 #if V8_TARGET_ARCH_IA32
@@ -83,163 +81,4 @@
 #error Unsupported target architecture.
 #endif
 
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// Code generation can be nested.  Code generation scopes form a stack
-// of active code generators.
-class CodeGeneratorScope BASE_EMBEDDED {
- public:
-  explicit CodeGeneratorScope(Isolate* isolate, CodeGenerator* cgen)
-      : isolate_(isolate) {
-    previous_ = isolate->current_code_generator();
-    isolate->set_current_code_generator(cgen);
-  }
-
-  ~CodeGeneratorScope() {
-    isolate_->set_current_code_generator(previous_);
-  }
-
-  static CodeGenerator* Current(Isolate* isolate) {
-    ASSERT(isolate->current_code_generator() != NULL);
-    return isolate->current_code_generator();
-  }
-
- private:
-  CodeGenerator* previous_;
-  Isolate* isolate_;
-};
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-
-// State of used registers in a virtual frame.
-class FrameRegisterState {
- public:
-  // Captures the current state of the given frame.
-  explicit FrameRegisterState(VirtualFrame* frame);
-
-  // Saves the state in the stack.
-  void Save(MacroAssembler* masm) const;
-
-  // Restores the state from the stack.
-  void Restore(MacroAssembler* masm) const;
-
- private:
-  // Constants indicating special actions.  They should not be multiples
-  // of kPointerSize so they will not collide with valid offsets from
-  // the frame pointer.
-  static const int kIgnore = -1;
-  static const int kPush = 1;
-
-  // This flag is ored with a valid offset from the frame pointer, so
-  // it should fit in the low zero bits of a valid offset.
-  static const int kSyncedFlag = 2;
-
-  int registers_[RegisterAllocator::kNumRegisters];
-};
-
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
-
-
-class FrameRegisterState {
- public:
-  inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
-
-  inline const VirtualFrame* frame() const { return &frame_; }
-
- private:
-  VirtualFrame frame_;
-};
-
-#else
-
-#error Unsupported target architecture.
-
-#endif
-
-
-// RuntimeCallHelper implementation that saves/restores state of a
-// virtual frame.
-class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
- public:
-  // Does not take ownership of |frame_state|.
-  explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
-      : frame_state_(frame_state) {}
-
-  virtual void BeforeCall(MacroAssembler* masm) const;
-
-  virtual void AfterCall(MacroAssembler* masm) const;
-
- private:
-  const FrameRegisterState* frame_state_;
-};
-
-
-// Deferred code objects are small pieces of code that are compiled
-// out of line. They are used to defer the compilation of uncommon
-// paths thereby avoiding expensive jumps around uncommon code parts.
-class DeferredCode: public ZoneObject {
- public:
-  DeferredCode();
-  virtual ~DeferredCode() { }
-
-  virtual void Generate() = 0;
-
-  MacroAssembler* masm() { return masm_; }
-
-  int statement_position() const { return statement_position_; }
-  int position() const { return position_; }
-
-  Label* entry_label() { return &entry_label_; }
-  Label* exit_label() { return &exit_label_; }
-
-#ifdef DEBUG
-  void set_comment(const char* comment) { comment_ = comment; }
-  const char* comment() const { return comment_; }
-#else
-  void set_comment(const char* comment) { }
-  const char* comment() const { return ""; }
-#endif
-
-  inline void Jump();
-  inline void Branch(Condition cc);
-  void BindExit() { masm_->bind(&exit_label_); }
-
-  const FrameRegisterState* frame_state() const { return &frame_state_; }
-
-  void SaveRegisters();
-  void RestoreRegisters();
-  void Exit();
-
-  // If this returns true then all registers will be saved for the duration
-  // of the Generate() call.  Otherwise the registers are not saved and the
-  // Generate() call must bracket runtime any runtime calls with calls to
-  // SaveRegisters() and RestoreRegisters().  In this case the Generate
-  // method must also call Exit() in order to return to the non-deferred
-  // code.
-  virtual bool AutoSaveAndRestore() { return true; }
-
- protected:
-  MacroAssembler* masm_;
-
- private:
-  int statement_position_;
-  int position_;
-
-  Label entry_label_;
-  Label exit_label_;
-
-  FrameRegisterState frame_state_;
-
-#ifdef DEBUG
-  const char* comment_;
-#endif
-  DISALLOW_COPY_AND_ASSIGN(DeferredCode);
-};
-
-
-} }  // namespace v8::internal
-
 #endif  // V8_CODEGEN_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 4d90c97..86d5de3 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -30,7 +30,7 @@
 #include "compiler.h"
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compilation-cache.h"
 #include "data-flow.h"
 #include "debug.h"
@@ -567,7 +567,7 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
-    if (strict_mode == kStrictMode) info.MarkAsStrict();
+    if (strict_mode == kStrictMode) info.MarkAsStrictMode();
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
@@ -604,6 +604,12 @@
     // parsing statistics.
     HistogramTimerScope timer(isolate->counters()->compile_lazy());
 
+    // After parsing we know function's strict mode. Remember it.
+    if (info->function()->strict_mode()) {
+      shared->set_strict_mode(true);
+      info->MarkAsStrictMode();
+    }
+
     // Compile the code.
     if (!MakeCode(info)) {
       if (!isolate->has_pending_exception()) {
@@ -784,7 +790,7 @@
     }
   }
 
-  GDBJIT(AddCode(name,
+  GDBJIT(AddCode(Handle<String>(shared->DebugName()),
                  Handle<Script>(info->script()),
                  Handle<Code>(info->code())));
 }
diff --git a/src/compiler.h b/src/compiler.h
index 3cc2490..e75e869 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,6 @@
 
 #include "ast.h"
 #include "frame-element.h"
-#include "register-allocator.h"
 #include "zone.h"
 
 namespace v8 {
@@ -53,7 +52,7 @@
   bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
   bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
   bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
-  bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
+  bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; }
   bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
@@ -74,11 +73,11 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
-  void MarkAsStrict() {
-    flags_ |= IsStrict::encode(true);
+  void MarkAsStrictMode() {
+    flags_ |= IsStrictMode::encode(true);
   }
   StrictModeFlag StrictMode() {
-    return is_strict() ? kStrictMode : kNonStrictMode;
+    return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
@@ -165,7 +164,7 @@
   void Initialize(Mode mode) {
     mode_ = V8::UseCrankshaft() ? mode : NONOPT;
     if (!shared_info_.is_null() && shared_info_->strict_mode()) {
-      MarkAsStrict();
+      MarkAsStrictMode();
     }
   }
 
@@ -185,7 +184,7 @@
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
-  class IsStrict: public BitField<bool, 4, 1> {};
+  class IsStrictMode: public BitField<bool, 4, 1> {};
   // Native syntax (%-stuff) allowed?
   class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
 
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index bf02947..cb7dbf8 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -60,11 +60,7 @@
   if (x < k2Pow52) {
     x += k2Pow52;
     uint32_t result;
-#ifdef BIG_ENDIAN_FLOATING_POINT
-    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
-#else
     Address mantissa_ptr = reinterpret_cast<Address>(&x);
-#endif
     // Copy least significant 32 bits of mantissa.
     memcpy(&result, mantissa_ptr, sizeof(result));
     return negative ? ~result + 1 : result;
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 9c02ff4..79339ed 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -490,12 +490,6 @@
 }
 
 
-void AssignedVariablesAnalyzer::VisitIncrementOperation(
-    IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
   ASSERT(av_.IsEmpty());
   if (expr->is_prefix()) MarkIfTrivial(expr->expression());
diff --git a/src/debug.cc b/src/debug.cc
index d6f91d8..093f38e 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -167,7 +167,6 @@
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           !code->is_binary_op_stub() &&
            !code->is_type_recording_binary_op_stub() &&
            !code->is_compare_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
@@ -810,7 +809,7 @@
     Handle<Object> message = MessageHandler::MakeMessageObject(
         "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
         Handle<String>(), Handle<JSArray>());
-    MessageHandler::ReportMessage(NULL, message);
+    MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
     return false;
   }
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 0fed391..2fc0e47 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -218,8 +218,7 @@
       fp_to_sp_delta_(fp_to_sp_delta),
       output_count_(0),
       output_(NULL),
-      integer32_values_(NULL),
-      double_values_(NULL) {
+      deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
     PrintF("**** DEOPT: ");
     function->PrintName();
@@ -258,8 +257,6 @@
 
 Deoptimizer::~Deoptimizer() {
   ASSERT(input_ == NULL && output_ == NULL);
-  delete[] integer32_values_;
-  delete[] double_values_;
 }
 
 
@@ -390,13 +387,8 @@
   int count = iterator.Next();
   ASSERT(output_ == NULL);
   output_ = new FrameDescription*[count];
-  // Per-frame lists of untagged and unboxed int32 and double values.
-  integer32_values_ = new List<ValueDescriptionInteger32>[count];
-  double_values_ = new List<ValueDescriptionDouble>[count];
   for (int i = 0; i < count; ++i) {
     output_[i] = NULL;
-    integer32_values_[i].Initialize(0);
-    double_values_[i].Initialize(0);
   }
   output_count_ = count;
 
@@ -424,37 +416,19 @@
 }
 
 
-void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
-  // We need to adjust the stack index by one for the top-most frame.
-  int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
-  List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
-  for (int i = 0; i < ints->length(); i++) {
-    ValueDescriptionInteger32 value = ints->at(i);
-    double val = static_cast<double>(value.int32_value());
-    InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
+void Deoptimizer::MaterializeHeapNumbers() {
+  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+    HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+    Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+    if (FLAG_trace_deopt) {
+      PrintF("Materializing a new heap number %p [%e] in slot %p\n",
+             reinterpret_cast<void*>(*num),
+             d.value(),
+             d.slot_address());
+    }
+
+    Memory::Object_at(d.slot_address()) = *num;
   }
-
-  // Iterate over double values and convert them to a heap number.
-  List<ValueDescriptionDouble>* doubles = &double_values_[index];
-  for (int i = 0; i < doubles->length(); ++i) {
-    ValueDescriptionDouble value = doubles->at(i);
-    InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
-                          extra_slot_count);
-  }
-}
-
-
-void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
-                                        int stack_index,
-                                        double val,
-                                        int extra_slot_count) {
-  // Add one to the TOS index to take the 'state' pushed before jumping
-  // to the stub that calls Runtime::NotifyDeoptimized into account.
-  int tos_index = stack_index + extra_slot_count;
-  int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
-  if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
-  Handle<Object> num = isolate_->factory()->NewNumber(val);
-  frame->SetExpression(index, *num);
 }
 
 
@@ -500,7 +474,6 @@
       int input_reg = iterator->Next();
       intptr_t value = input_->GetRegister(input_reg);
       bool is_smi = Smi::IsValid(value);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF(
             "    0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
@@ -517,9 +490,8 @@
       } else {
         // We save the untagged value on the side and store a GC-safe
         // temporary placeholder in the frame.
-        AddInteger32Value(frame_index,
-                          output_index,
-                          static_cast<int32_t>(value));
+        AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+                       static_cast<double>(static_cast<int32_t>(value)));
         output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       }
       return;
@@ -528,7 +500,6 @@
     case Translation::DOUBLE_REGISTER: {
       int input_reg = iterator->Next();
       double value = input_->GetDoubleRegister(input_reg);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
                output_[frame_index]->GetTop() + output_offset,
@@ -538,7 +509,7 @@
       }
       // We save the untagged value on the side and store a GC-safe
       // temporary placeholder in the frame.
-      AddDoubleValue(frame_index, output_index, value);
+      AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
       output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       return;
     }
@@ -566,7 +537,6 @@
           input_->GetOffsetFromSlotIndex(this, input_slot_index);
       intptr_t value = input_->GetFrameSlot(input_offset);
       bool is_smi = Smi::IsValid(value);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
@@ -583,9 +553,8 @@
       } else {
         // We save the untagged value on the side and store a GC-safe
         // temporary placeholder in the frame.
-        AddInteger32Value(frame_index,
-                          output_index,
-                          static_cast<int32_t>(value));
+        AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+                       static_cast<double>(static_cast<int32_t>(value)));
         output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       }
       return;
@@ -596,7 +565,6 @@
       unsigned input_offset =
           input_->GetOffsetFromSlotIndex(this, input_slot_index);
       double value = input_->GetDoubleFrameSlot(input_offset);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
                output_[frame_index]->GetTop() + output_offset,
@@ -606,7 +574,7 @@
       }
       // We save the untagged value on the side and store a GC-safe
       // temporary placeholder in the frame.
-      AddDoubleValue(frame_index, output_index, value);
+      AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
       output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       return;
     }
@@ -910,19 +878,11 @@
 }
 
 
-void Deoptimizer::AddInteger32Value(int frame_index,
-                                    int slot_index,
-                                    int32_t value) {
-  ValueDescriptionInteger32 value_desc(slot_index, value);
-  integer32_values_[frame_index].Add(value_desc);
-}
-
-
-void Deoptimizer::AddDoubleValue(int frame_index,
-                                 int slot_index,
+void Deoptimizer::AddDoubleValue(intptr_t slot_address,
                                  double value) {
-  ValueDescriptionDouble value_desc(slot_index, value);
-  double_values_[frame_index].Add(value_desc);
+  HeapNumberMaterializationDescriptor value_desc(
+      reinterpret_cast<Address>(slot_address), value);
+  deferred_heap_numbers_.Add(value_desc);
 }
 
 
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 514de05..cb82f44 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -42,38 +42,17 @@
 class DeoptimizingCodeListNode;
 
 
-class ValueDescription BASE_EMBEDDED {
+class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
  public:
-  explicit ValueDescription(int index) : stack_index_(index) { }
-  int stack_index() const { return stack_index_; }
+  HeapNumberMaterializationDescriptor(Address slot_address, double val)
+      : slot_address_(slot_address), val_(val) { }
+
+  Address slot_address() const { return slot_address_; }
+  double value() const { return val_; }
 
  private:
-  // Offset relative to the top of the stack.
-  int stack_index_;
-};
-
-
-class ValueDescriptionInteger32: public ValueDescription {
- public:
-  ValueDescriptionInteger32(int index, int32_t value)
-      : ValueDescription(index), int32_value_(value) { }
-  int32_t int32_value() const { return int32_value_; }
-
- private:
-  // Raw value.
-  int32_t int32_value_;
-};
-
-
-class ValueDescriptionDouble: public ValueDescription {
- public:
-  ValueDescriptionDouble(int index, double value)
-      : ValueDescription(index), double_value_(value) { }
-  double double_value() const { return double_value_; }
-
- private:
-  // Raw value.
-  double double_value_;
+  Address slot_address_;
+  double val_;
 };
 
 
@@ -190,7 +169,7 @@
 
   ~Deoptimizer();
 
-  void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
+  void MaterializeHeapNumbers();
 
   static void ComputeOutputFrames(Deoptimizer* deoptimizer);
 
@@ -277,13 +256,7 @@
 
   Object* ComputeLiteral(int index) const;
 
-  void InsertHeapNumberValue(JavaScriptFrame* frame,
-                             int stack_index,
-                             double val,
-                             int extra_slot_count);
-
-  void AddInteger32Value(int frame_index, int slot_index, int32_t value);
-  void AddDoubleValue(int frame_index, int slot_index, double value);
+  void AddDoubleValue(intptr_t slot_address, double value);
 
   static LargeObjectChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
@@ -310,8 +283,7 @@
   // Array of output frame descriptions.
   FrameDescription** output_;
 
-  List<ValueDescriptionInteger32>* integer32_values_;
-  List<ValueDescriptionDouble>* double_values_;
+  List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
   static int table_entry_size_;
 
diff --git a/src/disassembler.cc b/src/disassembler.cc
index d142ef6..65e1668 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "disasm.h"
@@ -282,7 +282,8 @@
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
-      } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
+                 Isolate::Current()->deoptimizer_data() != NULL) {
         // A runtime entry reloinfo might be a deoptimization bailout.
         Address addr = relocinfo.target_address();
         int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
diff --git a/src/execution.cc b/src/execution.cc
index e1b3624..eb26438 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -31,7 +31,7 @@
 
 #include "api.h"
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 236db05..5951806 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -148,15 +148,26 @@
 }
 
 
+Address JavaScriptFrame::GetParameterSlot(int index) const {
+  int param_count = ComputeParametersCount();
+  ASSERT(-1 <= index && index < param_count);
+  int parameter_offset = (param_count - index - 1) * kPointerSize;
+  return caller_sp() + parameter_offset;
+}
+
+
+Object* JavaScriptFrame::GetParameter(int index) const {
+  return Memory::Object_at(GetParameterSlot(index));
+}
+
+
 inline Object* JavaScriptFrame::receiver() const {
-  const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  return Memory::Object_at(caller_sp() + offset);
+  return GetParameter(-1);
 }
 
 
 inline void JavaScriptFrame::set_receiver(Object* value) {
-  const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  Memory::Object_at(caller_sp() + offset) = value;
+  Memory::Object_at(GetParameterSlot(-1)) = value;
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index 1672b1d..e0517c8 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -579,9 +579,7 @@
       isolate(), pc(), &safepoint_entry, &stack_slots);
   unsigned slot_space = stack_slots * kPointerSize;
 
-  // Visit the outgoing parameters. This is usually dealt with by the
-  // callee, but while GC'ing we artificially lower the number of
-  // arguments to zero and let the caller deal with it.
+  // Visit the outgoing parameters.
   Object** parameters_base = &Memory::Object_at(sp());
   Object** parameters_limit = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
@@ -635,21 +633,6 @@
 
   // Visit the return address in the callee and incoming arguments.
   IteratePc(v, pc_address(), code);
-  IterateArguments(v);
-}
-
-
-Object* JavaScriptFrame::GetParameter(int index) const {
-  ASSERT(index >= 0 && index < ComputeParametersCount());
-  const int offset = JavaScriptFrameConstants::kParam0Offset;
-  return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
-}
-
-
-int JavaScriptFrame::ComputeParametersCount() const {
-  Address base  = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
-  Address limit = fp() + JavaScriptFrameConstants::kLastParameterOffset;
-  return static_cast<int>((base - limit) / kPointerSize);
 }
 
 
@@ -669,27 +652,17 @@
 }
 
 
+int JavaScriptFrame::GetNumberOfIncomingArguments() const {
+  ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+         isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
+
+  JSFunction* function = JSFunction::cast(this->function());
+  return function->shared()->formal_parameter_count();
+}
+
+
 Address JavaScriptFrame::GetCallerStackPointer() const {
-  int arguments;
-  if (SafeStackFrameIterator::is_active(isolate()) ||
-      isolate()->heap()->gc_state() != Heap::NOT_IN_GC) {
-    // If the we are currently iterating the safe stack the
-    // arguments for frames are traversed as if they were
-    // expression stack elements of the calling frame. The reason for
-    // this rather strange decision is that we cannot access the
-    // function during mark-compact GCs when objects may have been marked.
-    // In fact accessing heap objects (like function->shared() below)
-    // at all during GC is problematic.
-    arguments = 0;
-  } else {
-    // Compute the number of arguments by getting the number of formal
-    // parameters of the function. We must remember to take the
-    // receiver into account (+1).
-    JSFunction* function = JSFunction::cast(this->function());
-    arguments = function->shared()->formal_parameter_count() + 1;
-  }
-  const int offset = StandardFrameConstants::kCallerSPOffset;
-  return fp() + offset + (arguments * kPointerSize);
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
@@ -867,9 +840,7 @@
 
 
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
-  const int arguments = Smi::cast(GetExpression(0))->value();
-  const int offset = StandardFrameConstants::kCallerSPOffset;
-  return fp() + offset + (arguments + 1) * kPointerSize;
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
@@ -1109,17 +1080,6 @@
 void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
   IterateExpressions(v);
   IteratePc(v, pc_address(), LookupCode());
-  IterateArguments(v);
-}
-
-
-void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
-  // Traverse callee-saved registers, receiver, and parameters.
-  const int kBaseOffset = JavaScriptFrameConstants::kLastParameterOffset;
-  const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
-  Object** base = &Memory::Object_at(fp() + kBaseOffset);
-  Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
-  v->VisitPointers(base, limit);
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index d6307f0..da9009b 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -463,8 +463,11 @@
   inline void set_receiver(Object* value);
 
   // Access the parameters.
-  Object* GetParameter(int index) const;
-  int ComputeParametersCount() const;
+  inline Address GetParameterSlot(int index) const;
+  inline Object* GetParameter(int index) const;
+  inline int ComputeParametersCount() const {
+    return GetNumberOfIncomingArguments();
+  }
 
   // Check if this frame is a constructor frame invoked through 'new'.
   bool IsConstructor() const;
@@ -502,6 +505,8 @@
 
   virtual Address GetCallerStackPointer() const;
 
+  virtual int GetNumberOfIncomingArguments() const;
+
   // Garbage collection support. Iterates over incoming arguments,
   // receiver, and any callee-saved registers.
   void IterateArguments(ObjectVisitor* v) const;
@@ -562,6 +567,10 @@
   explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
       : JavaScriptFrame(iterator) { }
 
+  virtual int GetNumberOfIncomingArguments() const {
+    return Smi::cast(GetExpression(0))->value();
+  }
+
   virtual Address GetCallerStackPointer() const;
 
  private:
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index b896fc8..d6ba56e 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,7 @@
 
 #include "v8.h"
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -213,12 +213,6 @@
 }
 
 
-void BreakableStatementChecker::VisitIncrementOperation(
-    IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void BreakableStatementChecker::VisitProperty(Property* expr) {
   // Property load is breakable.
   is_breakable_ = true;
@@ -1357,11 +1351,6 @@
 }
 
 
-void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
   // The macros used here must preserve the result register.
   __ Drop(stack_depth);
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index c8dbf5d..bf8ac19 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1445,11 +1445,16 @@
 }
 
 
+Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+
+
 void GDBJITInterface::AddCode(const char* name,
                               Code* code,
                               GDBJITInterface::CodeTag tag,
                               Script* script) {
   if (!FLAG_gdbjit) return;
+
+  ScopedLock lock(mutex_);
   AssertNoAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -1518,6 +1523,7 @@
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
+  ScopedLock lock(mutex_);
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -1537,6 +1543,7 @@
 
 void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
                                                GDBJITLineInfo* line_info) {
+  ScopedLock lock(mutex_);
   ASSERT(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   ASSERT(e->value == NULL);
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index d46fec6..de6928f 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -126,6 +126,9 @@
   static void RemoveCode(Code* code);
 
   static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+
+ private:
+  static Mutex* mutex_;
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 4d13859..c4e8f13 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -558,28 +558,25 @@
 void GlobalHandles::AddObjectGroup(Object*** handles,
                                    size_t length,
                                    v8::RetainedObjectInfo* info) {
-  ObjectGroup* new_entry = new ObjectGroup(length, info);
-  for (size_t i = 0; i < length; ++i) {
-    new_entry->objects_.Add(handles[i]);
+  if (length == 0) {
+    if (info != NULL) info->Dispose();
+    return;
   }
-  object_groups_.Add(new_entry);
+  object_groups_.Add(ObjectGroup::New(handles, length, info));
 }
 
 
-void GlobalHandles::AddImplicitReferences(HeapObject* parent,
+void GlobalHandles::AddImplicitReferences(HeapObject** parent,
                                           Object*** children,
                                           size_t length) {
-  ImplicitRefGroup* new_entry = new ImplicitRefGroup(parent, length);
-  for (size_t i = 0; i < length; ++i) {
-    new_entry->children_.Add(children[i]);
-  }
-  implicit_ref_groups_.Add(new_entry);
+  if (length == 0) return;
+  implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
 }
 
 
 void GlobalHandles::RemoveObjectGroups() {
   for (int i = 0; i < object_groups_.length(); i++) {
-    delete object_groups_.at(i);
+    object_groups_.at(i)->Dispose();
   }
   object_groups_.Clear();
 }
@@ -587,7 +584,7 @@
 
 void GlobalHandles::RemoveImplicitRefGroups() {
   for (int i = 0; i < implicit_ref_groups_.length(); i++) {
-    delete implicit_ref_groups_.at(i);
+    implicit_ref_groups_.at(i)->Dispose();
   }
   implicit_ref_groups_.Clear();
 }
diff --git a/src/global-handles.h b/src/global-handles.h
index a6afb2d..3b6b7b3 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -42,37 +42,66 @@
 // An object group is treated like a single JS object: if one of object in
 // the group is alive, all objects in the same group are considered alive.
 // An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup : public Malloced {
+class ObjectGroup {
  public:
-  ObjectGroup() : objects_(4) {}
-  ObjectGroup(size_t capacity, v8::RetainedObjectInfo* info)
-      : objects_(static_cast<int>(capacity)),
-        info_(info) { }
-  ~ObjectGroup();
+  static ObjectGroup* New(Object*** handles,
+                          size_t length,
+                          v8::RetainedObjectInfo* info) {
+    ASSERT(length > 0);
+    ObjectGroup* group = reinterpret_cast<ObjectGroup*>(
+        malloc(OFFSET_OF(ObjectGroup, objects_[length])));
+    group->length_ = length;
+    group->info_ = info;
+    CopyWords(group->objects_, handles, static_cast<int>(length));
+    return group;
+  }
 
-  List<Object**> objects_;
+  void Dispose() {
+    free(this);
+  }
+
+  size_t length_;
   v8::RetainedObjectInfo* info_;
+  Object** objects_[1];  // Variable sized array.
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ObjectGroup);
+  void* operator new(size_t size);
+  void operator delete(void* p);
+  ~ObjectGroup();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup);
 };
 
 
 // An implicit references group consists of two parts: a parent object and
 // a list of children objects.  If the parent is alive, all the children
 // are alive too.
-class ImplicitRefGroup : public Malloced {
+class ImplicitRefGroup {
  public:
-  ImplicitRefGroup() : children_(4) {}
-  ImplicitRefGroup(HeapObject* parent, size_t capacity)
-      : parent_(parent),
-        children_(static_cast<int>(capacity)) { }
+  static ImplicitRefGroup* New(HeapObject** parent,
+                               Object*** children,
+                               size_t length) {
+    ASSERT(length > 0);
+    ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
+        malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
+    group->parent_ = parent;
+    group->length_ = length;
+    CopyWords(group->children_, children, static_cast<int>(length));
+    return group;
+  }
 
-  HeapObject* parent_;
-  List<Object**> children_;
+  void Dispose() {
+    free(this);
+  }
+
+  HeapObject** parent_;
+  size_t length_;
+  Object** children_[1];  // Variable sized array.
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ImplicitRefGroup);
+  void* operator new(size_t size);
+  void operator delete(void* p);
+  ~ImplicitRefGroup();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
 };
 
 
@@ -154,7 +183,7 @@
   // Add an implicit references' group.
   // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a mark-compact collection.
-  void AddImplicitReferences(HeapObject* parent,
+  void AddImplicitReferences(HeapObject** parent,
                              Object*** children,
                              size_t length);
 
diff --git a/src/handles.cc b/src/handles.cc
index 97a06d9..326de86 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -369,6 +369,17 @@
 }
 
 
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           Handle<String> name,
+                           LookupResult* result) {
+  PropertyAttributes attributes;
+  Isolate* isolate = Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate,
+                     obj->GetProperty(*obj, result, *name, &attributes),
+                     Object);
+}
+
+
 Handle<Object> GetElement(Handle<Object> obj,
                           uint32_t index) {
   Isolate* isolate = Isolate::Current();
diff --git a/src/handles.h b/src/handles.h
index a357a00..3839f37 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -244,6 +244,11 @@
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           Handle<String> name,
+                           LookupResult* result);
+
+
 Handle<Object> GetElement(Handle<Object> obj,
                           uint32_t index);
 
diff --git a/src/heap.cc b/src/heap.cc
index 6250172..c77364b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compilation-cache.h"
 #include "debug.h"
 #include "heap-profiler.h"
@@ -3231,7 +3231,7 @@
   // Fill these accessors into the dictionary.
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details = descs->GetDetails(i);
+    PropertyDetails details(descs->GetDetails(i));
     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
     PropertyDetails d =
         PropertyDetails(details.attributes(), CALLBACKS, details.index());
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index f7adea6..032ca76 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1050,6 +1050,23 @@
 }
 
 
+bool HConstant::ToBoolean() const {
+  // Converts the constant's boolean value according to
+  // ECMAScript section 9.2 ToBoolean conversion.
+  if (HasInteger32Value()) return Integer32Value() != 0;
+  if (HasDoubleValue()) {
+    double v = DoubleValue();
+    return v != 0 && !isnan(v);
+  }
+  if (handle()->IsTrue()) return true;
+  if (handle()->IsFalse()) return false;
+  if (handle()->IsUndefined()) return false;
+  if (handle()->IsNull()) return false;
+  if (handle()->IsString() &&
+      String::cast(*handle())->length() == 0) return false;
+  return true;
+}
+
 void HConstant::PrintDataTo(StringStream* stream) {
   handle()->ShortPrint(stream);
 }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 053ae9e..c736553 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1961,6 +1961,8 @@
   }
   bool HasStringValue() const { return handle_->IsString(); }
 
+  bool ToBoolean() const;
+
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->allow_allocation(false));
     return reinterpret_cast<intptr_t>(*handle());
@@ -2908,8 +2910,10 @@
   HStoreGlobalGeneric(HValue* context,
                       HValue* global_object,
                       Handle<Object> name,
-                      HValue* value)
-      : name_(name) {
+                      HValue* value,
+                      bool strict_mode)
+      : name_(name),
+        strict_mode_(strict_mode) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     SetOperandAt(2, value);
@@ -2921,6 +2925,7 @@
   HValue* global_object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
   HValue* value() { return OperandAt(2); }
+  bool strict_mode() { return strict_mode_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -2932,6 +2937,7 @@
 
  private:
   Handle<Object> name_;
+  bool strict_mode_;
 };
 
 
@@ -3263,8 +3269,10 @@
   HStoreNamedGeneric(HValue* context,
                      HValue* object,
                      Handle<String> name,
-                     HValue* value)
-      : name_(name) {
+                     HValue* value,
+                     bool strict_mode)
+      : name_(name),
+        strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -3275,6 +3283,7 @@
   HValue* value() { return OperandAt(1); }
   HValue* context() { return OperandAt(2); }
   Handle<String> name() { return name_; }
+  bool strict_mode() { return strict_mode_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3286,6 +3295,7 @@
 
  private:
   Handle<String> name_;
+  bool strict_mode_;
 };
 
 
@@ -3363,7 +3373,9 @@
   HStoreKeyedGeneric(HValue* context,
                      HValue* object,
                      HValue* key,
-                     HValue* value) {
+                     HValue* value,
+                     bool strict_mode)
+      : strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -3375,6 +3387,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   HValue* context() { return OperandAt(3); }
+  bool strict_mode() { return strict_mode_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -3383,6 +3396,9 @@
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+
+ private:
+  bool strict_mode_;
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 056cf6a..61496aa 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -582,7 +582,7 @@
       phi_list_(NULL) {
   start_environment_ =
       new(zone()) HEnvironment(NULL, info->scope(), info->closure());
-  start_environment_->set_ast_id(info->function()->id());
+  start_environment_->set_ast_id(AstNode::kFunctionEntryId);
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
 }
@@ -2203,7 +2203,7 @@
     HEnvironment* initial_env = environment()->CopyWithoutHistory();
     HBasicBlock* body_entry = CreateBasicBlock(initial_env);
     current_block()->Goto(body_entry);
-    body_entry->SetJoinId(info()->function()->id());
+    body_entry->SetJoinId(AstNode::kFunctionEntryId);
     set_current_block(body_entry);
     VisitStatements(info()->function()->body());
     if (HasStackOverflow()) return NULL;
@@ -2981,7 +2981,12 @@
             HValue* value = Pop();
             Handle<String> name = Handle<String>::cast(key->handle());
             HStoreNamedGeneric* store =
-                new(zone()) HStoreNamedGeneric(context, literal, name, value);
+                new(zone()) HStoreNamedGeneric(
+                                context,
+                                literal,
+                                name,
+                                value,
+                                function_strict_mode());
             AddInstruction(store);
             AddSimulate(key->id());
           } else {
@@ -3122,7 +3127,12 @@
                                                     HValue* value) {
   HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new(zone()) HStoreNamedGeneric(context, object, name, value);
+  return new(zone()) HStoreNamedGeneric(
+                         context,
+                         object,
+                         name,
+                         value,
+                         function_strict_mode());
 }
 
 
@@ -3264,26 +3274,8 @@
     value = Pop();
     HValue* key = Pop();
     HValue* object = Pop();
-
-    if (expr->IsMonomorphic()) {
-      Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
-      // An object has either fast elements or external array elements, but
-      // never both. Pixel array maps that are assigned to pixel array elements
-      // are always created with the fast elements flag cleared.
-      if (receiver_type->has_external_array_elements()) {
-        instr = BuildStoreKeyedSpecializedArrayElement(object,
-                                                       key,
-                                                       value,
-                                                       expr);
-      } else if (receiver_type->has_fast_elements()) {
-        instr = BuildStoreKeyedFastElement(object, key, value, expr);
-      }
-    }
-    if (instr == NULL) {
-      instr = BuildStoreKeyedGeneric(object, key, value);
-    }
+    instr = BuildStoreKeyed(object, key, value, expr);
   }
-
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
@@ -3318,7 +3310,8 @@
         new(zone()) HStoreGlobalGeneric(context,
                                         global_object,
                                         var->name(),
-                                        value);
+                                        value,
+                                        function_strict_mode());
     instr->set_position(position);
     AddInstruction(instr);
     ASSERT(instr->HasSideEffects());
@@ -3402,11 +3395,7 @@
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      bool is_fast_elements = prop->IsMonomorphic() &&
-          prop->GetMonomorphicReceiverType()->has_fast_elements();
-      HInstruction* load = is_fast_elements
-          ? BuildLoadKeyedFastElement(obj, key, prop)
-          : BuildLoadKeyedGeneric(obj, key);
+      HInstruction* load = BuildLoadKeyed(obj, key, prop);
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
@@ -3418,9 +3407,8 @@
       PushAndAdd(instr);
       if (instr->HasSideEffects()) AddSimulate(operation->id());
 
-      HInstruction* store = is_fast_elements
-          ? BuildStoreKeyedFastElement(obj, key, instr, prop)
-          : BuildStoreKeyedGeneric(obj, key, instr);
+      expr->RecordTypeFeedback(oracle());
+      HInstruction* store = BuildStoreKeyed(obj, key, instr, expr);
       AddInstruction(store);
       // Drop the simulated receiver, key, and value.  Return the value.
       Drop(3);
@@ -3623,17 +3611,40 @@
   AddInstruction(external_elements);
   HLoadKeyedSpecializedArrayElement* pixel_array_value =
       new(zone()) HLoadKeyedSpecializedArrayElement(
-          external_elements, key, expr->GetExternalArrayType());
+          external_elements, key, expr->external_array_type());
   return pixel_array_value;
 }
 
 
+HInstruction* HGraphBuilder::BuildLoadKeyed(HValue* obj,
+                                            HValue* key,
+                                            Property* prop) {
+  if (prop->IsMonomorphic()) {
+    Handle<Map> receiver_type(prop->GetMonomorphicReceiverType());
+    // An object has either fast elements or pixel array elements, but never
+    // both. Pixel array maps that are assigned to pixel array elements are
+    // always created with the fast elements flag cleared.
+    if (receiver_type->has_external_array_elements()) {
+      return BuildLoadKeyedSpecializedArrayElement(obj, key, prop);
+    } else if (receiver_type->has_fast_elements()) {
+      return BuildLoadKeyedFastElement(obj, key, prop);
+    }
+  }
+  return BuildLoadKeyedGeneric(obj, key);
+}
+
+
 HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
                                                     HValue* key,
                                                     HValue* value) {
   HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new(zone()) HStoreKeyedGeneric(context, object, key, value);
+  return new(zone()) HStoreKeyedGeneric(
+                         context,
+                         object,
+                         key,
+                         value,
+                         function_strict_mode());
 }
 
 
@@ -3665,7 +3676,7 @@
     HValue* object,
     HValue* key,
     HValue* val,
-    Assignment* expr) {
+    Expression* expr) {
   ASSERT(expr->IsMonomorphic());
   AddInstruction(new(zone()) HCheckNonSmi(object));
   Handle<Map> map = expr->GetMonomorphicReceiverType();
@@ -3684,7 +3695,29 @@
       external_elements,
       key,
       val,
-      expr->GetExternalArrayType());
+      expr->external_array_type());
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyed(HValue* object,
+                                             HValue* key,
+                                             HValue* value,
+                                             Expression* expr) {
+  if (expr->IsMonomorphic()) {
+    Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
+    // An object has either fast elements or external array elements, but
+    // never both. Pixel array maps that are assigned to pixel array elements
+    // are always created with the fast elements flag cleared.
+    if (receiver_type->has_external_array_elements()) {
+      return BuildStoreKeyedSpecializedArrayElement(object,
+                                                    key,
+                                                    value,
+                                                    expr);
+    } else if (receiver_type->has_fast_elements()) {
+      return BuildStoreKeyedFastElement(object, key, value, expr);
+    }
+  }
+  return BuildStoreKeyedGeneric(object, key, value);
 }
 
 
@@ -3775,21 +3808,7 @@
 
     HValue* key = Pop();
     HValue* obj = Pop();
-
-    if (expr->IsMonomorphic()) {
-      Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
-      // An object has either fast elements or pixel array elements, but never
-      // both. Pixel array maps that are assigned to pixel array elements are
-      // always created with the fast elements flag cleared.
-      if (receiver_type->has_external_array_elements()) {
-        instr = BuildLoadKeyedSpecializedArrayElement(obj, key, expr);
-      } else if (receiver_type->has_fast_elements()) {
-        instr = BuildLoadKeyedFastElement(obj, key, expr);
-      }
-    }
-    if (instr == NULL) {
-      instr = BuildLoadKeyedGeneric(obj, key);
-    }
+    instr = BuildLoadKeyed(obj, key, expr);
   }
   instr->set_position(expr->position());
   ast_context()->ReturnInstruction(instr, expr->id());
@@ -4612,13 +4631,6 @@
 }
 
 
-void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
-  // IncrementOperation is never visited by the visitor. It only
-  // occurs as a subexpression of CountOperation.
-  UNREACHABLE();
-}
-
-
 HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
   HConstant* delta = increment
       ? graph_->GetConstant1()
@@ -4630,8 +4642,7 @@
 
 
 void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
-  IncrementOperation* increment = expr->increment();
-  Expression* target = increment->expression();
+  Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
   Variable* var = proxy->AsVariable();
   Property* prop = target->AsProperty();
@@ -4692,7 +4703,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(increment->id());
+      if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
       HValue* before = Pop();
       // There is no deoptimization to after the increment, so we don't need
@@ -4726,14 +4737,9 @@
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      bool is_fast_elements = prop->IsMonomorphic() &&
-          prop->GetMonomorphicReceiverType()->has_fast_elements();
-
-      HInstruction* load = is_fast_elements
-          ? BuildLoadKeyedFastElement(obj, key, prop)
-          : BuildLoadKeyedGeneric(obj, key);
+      HInstruction* load = BuildLoadKeyed(obj, key, prop);
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(increment->id());
+      if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
       HValue* before = Pop();
       // There is no deoptimization to after the increment, so we don't need
@@ -4741,9 +4747,8 @@
       HInstruction* after = BuildIncrement(before, inc);
       AddInstruction(after);
 
-      HInstruction* store = is_fast_elements
-          ? BuildStoreKeyedFastElement(obj, key, after, prop)
-          : BuildStoreKeyedGeneric(obj, key, after);
+      expr->RecordTypeFeedback(oracle());
+      HInstruction* store = BuildStoreKeyed(obj, key, after, expr);
       AddInstruction(store);
 
       // Drop the key from the bailout environment.  Overwrite the receiver
@@ -5674,7 +5679,7 @@
     inner->SetValueAt(local_base + i, undefined);
   }
 
-  inner->set_ast_id(function->id());
+  inner->set_ast_id(AstNode::kFunctionEntryId);
   return inner;
 }
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 1b2c76a..74c119a 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -709,6 +709,9 @@
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
   }
+  bool function_strict_mode() {
+    return function_state()->compilation_info()->is_strict_mode();
+  }
 
   // Generators for inline runtime functions.
 #define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize)      \
@@ -845,6 +848,10 @@
   HInstruction* BuildLoadKeyedGeneric(HValue* object,
                                       HValue* key);
 
+  HInstruction* BuildLoadKeyed(HValue* obj,
+                               HValue* key,
+                               Property* prop);
+
   HInstruction* BuildLoadNamed(HValue* object,
                                Property* prop,
                                Handle<Map> map,
@@ -874,7 +881,12 @@
       HValue* object,
       HValue* key,
       HValue* val,
-      Assignment* expr);
+      Expression* expr);
+
+  HInstruction* BuildStoreKeyed(HValue* object,
+                                HValue* key,
+                                HValue* value,
+                                Expression* assignment);
 
   HValue* BuildContextChainWalk(Variable* var);
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 97d2b03..29c67b5 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 78daf7c..cef3fdc 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -291,166 +291,6 @@
 }
 
 
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
-               op_name,
-               overwrite_name,
-               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
-               args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "",
-               static_operands_type_.ToString(),
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (!(left.is(left_arg) && right.is(right_arg))) {
-      if (left.is(right_arg) && right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          SetArgsReversed();
-        } else {
-          __ xchg(left, right);
-        }
-      } else if (left.is(left_arg)) {
-        __ mov(right_arg, right);
-      } else if (right.is(right_arg)) {
-        __ mov(left_arg, left);
-      } else if (left.is(right_arg)) {
-        if (IsOperationCommutative()) {
-          __ mov(left_arg, right);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying left argument.
-          __ mov(left_arg, left);
-          __ mov(right_arg, right);
-        }
-      } else if (right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          __ mov(right_arg, left);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying right argument.
-          __ mov(right_arg, right);
-          __ mov(left_arg, left);
-        }
-      } else {
-        // Order of moves is not important.
-        __ mov(left_arg, left);
-        __ mov(right_arg, right);
-      }
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    __ IncrementCounter(
-        masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Smi* right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(Immediate(right));
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (left.is(left_arg)) {
-      __ mov(right_arg, Immediate(right));
-    } else if (left.is(right_arg) && IsOperationCommutative()) {
-      __ mov(left_arg, Immediate(right));
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, left and right_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite left before moving
-      // it to left_arg.
-      __ mov(left_arg, left);
-      __ mov(right_arg, Immediate(right));
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    __ IncrementCounter(
-        masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Smi* left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(Immediate(left));
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (right.is(right_arg)) {
-      __ mov(left_arg, Immediate(left));
-    } else if (right.is(left_arg) && IsOperationCommutative()) {
-      __ mov(right_arg, Immediate(left));
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, right and left_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite right before moving
-      // it to right_arg.
-      __ mov(right_arg, right);
-      __ mov(left_arg, Immediate(left));
-    }
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
 class FloatingPointHelper : public AllStatic {
  public:
 
@@ -534,762 +374,6 @@
 };
 
 
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
-  // dividend in eax and edx free for the division.  Use eax, ebx for those.
-  Comment load_comment(masm, "-- Load arguments");
-  Register left = edx;
-  Register right = eax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    left = eax;
-    right = ebx;
-    if (HasArgsInRegisters()) {
-      __ mov(ebx, eax);
-      __ mov(eax, edx);
-    }
-  }
-  if (!HasArgsInRegisters()) {
-    __ mov(right, Operand(esp, 1 * kPointerSize));
-    __ mov(left, Operand(esp, 2 * kPointerSize));
-  }
-
-  if (static_operands_type_.IsSmi()) {
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    if (op_ == Token::BIT_OR) {
-      __ or_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    } else if (op_ == Token::BIT_AND) {
-      __ and_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    } else if (op_ == Token::BIT_XOR) {
-      __ xor_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    }
-  }
-
-  // 2. Prepare the smi check of both operands by oring them together.
-  Comment smi_check_comment(masm, "-- Smi check arguments");
-  Label not_smis;
-  Register combined = ecx;
-  ASSERT(!left.is(combined) && !right.is(combined));
-  switch (op_) {
-    case Token::BIT_OR:
-      // Perform the operation into eax and smi check the result.  Preserve
-      // eax in case the result is not a smi.
-      ASSERT(!left.is(ecx) && !right.is(ecx));
-      __ mov(ecx, right);
-      __ or_(right, Operand(left));  // Bitwise or is commutative.
-      combined = right;
-      break;
-
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      __ mov(combined, right);
-      __ or_(combined, Operand(left));
-      break;
-
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      // Move the right operand into ecx for the shift operation, use eax
-      // for the smi check register.
-      ASSERT(!left.is(ecx) && !right.is(ecx));
-      __ mov(ecx, right);
-      __ or_(right, Operand(left));
-      combined = right;
-      break;
-
-    default:
-      break;
-  }
-
-  // 3. Perform the smi check of the operands.
-  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
-  __ test(combined, Immediate(kSmiTagMask));
-  __ j(not_zero, &not_smis, not_taken);
-
-  // 4. Operands are both smis, perform the operation leaving the result in
-  // eax and check the result if necessary.
-  Comment perform_smi(masm, "-- Perform smi operation");
-  Label use_fp_on_smis;
-  switch (op_) {
-    case Token::BIT_OR:
-      // Nothing to do.
-      break;
-
-    case Token::BIT_XOR:
-      ASSERT(right.is(eax));
-      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
-      break;
-
-    case Token::BIT_AND:
-      ASSERT(right.is(eax));
-      __ and_(right, Operand(left));  // Bitwise and is commutative.
-      break;
-
-    case Token::SHL:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ shl_cl(left);
-      // Check that the *signed* result fits in a smi.
-      __ cmp(left, 0xc0000000);
-      __ j(sign, &use_fp_on_smis, not_taken);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::SAR:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ sar_cl(left);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::SHR:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ shr_cl(left);
-      // Check that the *unsigned* result fits in a smi.
-      // Neither of the two high-order bits can be set:
-      // - 0x80000000: high bit would be lost when smi tagging.
-      // - 0x40000000: this number would convert to negative when
-      // Smi tagging these two cases can only happen with shifts
-      // by 0 or 1 when handed a valid smi.
-      __ test(left, Immediate(0xc0000000));
-      __ j(not_zero, slow, not_taken);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::ADD:
-      ASSERT(right.is(eax));
-      __ add(right, Operand(left));  // Addition is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      break;
-
-    case Token::SUB:
-      __ sub(left, Operand(right));
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      __ mov(eax, left);
-      break;
-
-    case Token::MUL:
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      // We can't revert the multiplication if the result is not a smi
-      // so save the right operand.
-      __ mov(ebx, right);
-      // Remove tag from one of the operands (but keep sign).
-      __ SmiUntag(right);
-      // Do multiplication.
-      __ imul(right, Operand(left));  // Multiplication is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
-      break;
-
-    case Token::DIV:
-      // We can't revert the division if the result is not a smi so
-      // save the left operand.
-      __ mov(edi, left);
-      // Check for 0 divisor.
-      __ test(right, Operand(right));
-      __ j(zero, &use_fp_on_smis, not_taken);
-      // Sign extend left into edx:eax.
-      ASSERT(left.is(eax));
-      __ cdq();
-      // Divide edx:eax by right.
-      __ idiv(right);
-      // Check for the corner case of dividing the most negative smi by
-      // -1. We cannot use the overflow flag, since it is not set by idiv
-      // instruction.
-      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      __ cmp(eax, 0x40000000);
-      __ j(equal, &use_fp_on_smis);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
-      // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
-      __ j(not_zero, &use_fp_on_smis);
-      // Tag the result and store it in register eax.
-      __ SmiTag(eax);
-      break;
-
-    case Token::MOD:
-      // Check for 0 divisor.
-      __ test(right, Operand(right));
-      __ j(zero, &not_smis, not_taken);
-
-      // Sign extend left into edx:eax.
-      ASSERT(left.is(eax));
-      __ cdq();
-      // Divide edx:eax by right.
-      __ idiv(right);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(edx, combined, slow);
-      // Move remainder to register eax.
-      __ mov(eax, edx);
-      break;
-
-    default:
-      UNREACHABLE();
-  }
-
-  // 5. Emit return of result in eax.
-  GenerateReturn(masm);
-
-  // 6. For some operations emit inline code to perform floating point
-  // operations on known smis (e.g., if the result of the operation
-  // overflowed the smi range).
-  switch (op_) {
-    case Token::SHL: {
-      Comment perform_float(masm, "-- Perform float operation on smis");
-      __ bind(&use_fp_on_smis);
-      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
-        // Result we want is in left == edx, so we can put the allocated heap
-        // number in eax.
-        __ AllocateHeapNumber(eax, ecx, ebx, slow);
-        // Store the result in the HeapNumber and return.
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(left));
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        } else {
-          // It's OK to overwrite the right argument on the stack because we
-          // are about to return.
-          __ mov(Operand(esp, 1 * kPointerSize), left);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
-          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        }
-        GenerateReturn(masm);
-      } else {
-        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
-        __ jmp(slow);
-      }
-      break;
-    }
-
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      Comment perform_float(masm, "-- Perform float operation on smis");
-      __ bind(&use_fp_on_smis);
-      // Restore arguments to edx, eax.
-      switch (op_) {
-        case Token::ADD:
-          // Revert right = right + left.
-          __ sub(right, Operand(left));
-          break;
-        case Token::SUB:
-          // Revert left = left - right.
-          __ add(left, Operand(right));
-          break;
-        case Token::MUL:
-          // Right was clobbered but a copy is in ebx.
-          __ mov(right, ebx);
-          break;
-        case Token::DIV:
-          // Left was clobbered but a copy is in edi.  Right is in ebx for
-          // division.
-          __ mov(edx, edi);
-          __ mov(eax, right);
-          break;
-        default: UNREACHABLE();
-          break;
-      }
-      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
-        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-          switch (op_) {
-            case Token::ADD: __ addsd(xmm0, xmm1); break;
-            case Token::SUB: __ subsd(xmm0, xmm1); break;
-            case Token::MUL: __ mulsd(xmm0, xmm1); break;
-            case Token::DIV: __ divsd(xmm0, xmm1); break;
-            default: UNREACHABLE();
-          }
-          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
-        } else {  // SSE2 not available, use FPU.
-          FloatingPointHelper::LoadFloatSmis(masm, ebx);
-          switch (op_) {
-            case Token::ADD: __ faddp(1); break;
-            case Token::SUB: __ fsubp(1); break;
-            case Token::MUL: __ fmulp(1); break;
-            case Token::DIV: __ fdivp(1); break;
-            default: UNREACHABLE();
-          }
-          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
-        }
-        __ mov(eax, ecx);
-        GenerateReturn(masm);
-      } else {
-        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
-        __ jmp(slow);
-      }
-      break;
-    }
-
-    default:
-      break;
-  }
-
-  // 7. Non-smi operands, fall out to the non-smi code with the operands in
-  // edx and eax.
-  Comment done_comment(masm, "-- Enter non-smi code");
-  __ bind(&not_smis);
-  switch (op_) {
-    case Token::BIT_OR:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      // Right operand is saved in ecx and eax was destroyed by the smi
-      // check.
-      __ mov(eax, ecx);
-      break;
-
-    case Token::DIV:
-    case Token::MOD:
-      // Operands are in eax, ebx at this point.
-      __ mov(edx, eax);
-      __ mov(eax, ebx);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
-  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
-    Label slow;
-    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
-    __ bind(&slow);
-    GenerateTypeTransition(masm);
-  }
-
-  // Generate fast case smi code if requested. This flag is set when the fast
-  // case smi code is not generated by the caller. Generating it here will speed
-  // up common operations.
-  if (ShouldGenerateSmiCode()) {
-    GenerateSmiCode(masm, &call_runtime);
-  } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
-    if (!HasArgsInRegisters()) {
-      GenerateLoadArguments(masm);
-    }
-  }
-
-  // Floating point case.
-  if (ShouldGenerateFPCode()) {
-    switch (op_) {
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV: {
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-smi argument occurs
-          // (and only if smi code is generated). This is the right moment to
-          // patch to HEAP_NUMBERS state. The transition is attempted only for
-          // the four basic operations. The stub stays in the DEFAULT state
-          // forever for all other operations (also if smi code is skipped).
-          GenerateTypeTransition(masm);
-          break;
-        }
-
-        Label not_floats;
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          if (static_operands_type_.IsNumber()) {
-            if (FLAG_debug_code) {
-              // Assert at runtime that inputs are only numbers.
-              __ AbortIfNotNumber(edx);
-              __ AbortIfNotNumber(eax);
-            }
-            if (static_operands_type_.IsSmi()) {
-              if (FLAG_debug_code) {
-                __ AbortIfNotSmi(edx);
-                __ AbortIfNotSmi(eax);
-              }
-              FloatingPointHelper::LoadSSE2Smis(masm, ecx);
-            } else {
-              FloatingPointHelper::LoadSSE2Operands(masm);
-            }
-          } else {
-            FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-          }
-
-          switch (op_) {
-            case Token::ADD: __ addsd(xmm0, xmm1); break;
-            case Token::SUB: __ subsd(xmm0, xmm1); break;
-            case Token::MUL: __ mulsd(xmm0, xmm1); break;
-            case Token::DIV: __ divsd(xmm0, xmm1); break;
-            default: UNREACHABLE();
-          }
-          GenerateHeapResultAllocation(masm, &call_runtime);
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-          GenerateReturn(masm);
-        } else {  // SSE2 not available, use FPU.
-          if (static_operands_type_.IsNumber()) {
-            if (FLAG_debug_code) {
-              // Assert at runtime that inputs are only numbers.
-              __ AbortIfNotNumber(edx);
-              __ AbortIfNotNumber(eax);
-            }
-          } else {
-            FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
-          }
-          FloatingPointHelper::LoadFloatOperands(
-              masm,
-              ecx,
-              FloatingPointHelper::ARGS_IN_REGISTERS);
-          switch (op_) {
-            case Token::ADD: __ faddp(1); break;
-            case Token::SUB: __ fsubp(1); break;
-            case Token::MUL: __ fmulp(1); break;
-            case Token::DIV: __ fdivp(1); break;
-            default: UNREACHABLE();
-          }
-          Label after_alloc_failure;
-          GenerateHeapResultAllocation(masm, &after_alloc_failure);
-          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-          GenerateReturn(masm);
-          __ bind(&after_alloc_failure);
-          __ ffree();
-          __ jmp(&call_runtime);
-        }
-        __ bind(&not_floats);
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            !HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-number argument
-          // occurs (and only if smi code is skipped from the stub, otherwise
-          // the patching has already been done earlier in this case branch).
-          // Try patching to STRINGS for ADD operation.
-          if (op_ == Token::ADD) {
-            GenerateTypeTransition(masm);
-          }
-        }
-        break;
-      }
-      case Token::MOD: {
-        // For MOD we go directly to runtime in the non-smi case.
-        break;
-      }
-      case Token::BIT_OR:
-      case Token::BIT_AND:
-      case Token::BIT_XOR:
-      case Token::SAR:
-      case Token::SHL:
-      case Token::SHR: {
-        Label non_smi_result;
-        FloatingPointHelper::LoadAsIntegers(masm,
-                                            static_operands_type_,
-                                            use_sse3_,
-                                            &call_runtime);
-        switch (op_) {
-          case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-          case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-          case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
-          case Token::SAR: __ sar_cl(eax); break;
-          case Token::SHL: __ shl_cl(eax); break;
-          case Token::SHR: __ shr_cl(eax); break;
-          default: UNREACHABLE();
-        }
-        if (op_ == Token::SHR) {
-          // Check if result is non-negative and fits in a smi.
-          __ test(eax, Immediate(0xc0000000));
-          __ j(not_zero, &call_runtime);
-        } else {
-          // Check if result fits in a smi.
-          __ cmp(eax, 0xc0000000);
-          __ j(negative, &non_smi_result);
-        }
-        // Tag smi result and return.
-        __ SmiTag(eax);
-        GenerateReturn(masm);
-
-        // All ops except SHR return a signed int32 that we load in
-        // a HeapNumber.
-        if (op_ != Token::SHR) {
-          __ bind(&non_smi_result);
-          // Allocate a heap number if needed.
-          __ mov(ebx, Operand(eax));  // ebx: result
-          NearLabel skip_allocation;
-          switch (mode_) {
-            case OVERWRITE_LEFT:
-            case OVERWRITE_RIGHT:
-              // If the operand was an object, we skip the
-              // allocation of a heap number.
-              __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
-                                  1 * kPointerSize : 2 * kPointerSize));
-              __ test(eax, Immediate(kSmiTagMask));
-              __ j(not_zero, &skip_allocation, not_taken);
-              // Fall through!
-            case NO_OVERWRITE:
-              __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
-              __ bind(&skip_allocation);
-              break;
-            default: UNREACHABLE();
-          }
-          // Store the result in the HeapNumber and return.
-          if (CpuFeatures::IsSupported(SSE2)) {
-            CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, Operand(ebx));
-            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-          } else {
-            __ mov(Operand(esp, 1 * kPointerSize), ebx);
-            __ fild_s(Operand(esp, 1 * kPointerSize));
-            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-          }
-          GenerateReturn(masm);
-        }
-        break;
-      }
-      default: UNREACHABLE(); break;
-    }
-  }
-
-  // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-
-  // Avoid hitting the string ADD code below when allocation fails in
-  // the floating point code above.
-  if (op_ != Token::ADD) {
-    __ bind(&call_runtime);
-  }
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      // Test for string arguments before calling runtime.
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in edx, eax
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
-      // Registers containing left and right operands respectively.
-      Register lhs, rhs;
-      if (HasArgsReversed()) {
-        lhs = eax;
-        rhs = edx;
-      } else {
-        lhs = edx;
-        rhs = eax;
-      }
-
-      // Test if left operand is a string.
-      NearLabel lhs_not_string;
-      __ test(lhs, Immediate(kSmiTagMask));
-      __ j(zero, &lhs_not_string);
-      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &lhs_not_string);
-
-      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
-      __ TailCallStub(&string_add_left_stub);
-
-      NearLabel call_runtime_with_args;
-      // Left operand is not a string, test right.
-      __ bind(&lhs_not_string);
-      __ test(rhs, Immediate(kSmiTagMask));
-      __ j(zero, &call_runtime_with_args);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &call_runtime_with_args);
-
-      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
-      __ TailCallStub(&string_add_right_stub);
-
-      // Neither argument is a string.
-      __ bind(&call_runtime);
-      if (HasArgsInRegisters()) {
-        GenerateRegisterArgsPush(masm);
-      }
-      __ bind(&call_runtime_with_args);
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
-    case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
-    case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
-    case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
-                                                       Label* alloc_failure) {
-  Label skip_allocation;
-  OverwriteMode mode = mode_;
-  if (HasArgsReversed()) {
-    if (mode == OVERWRITE_RIGHT) {
-      mode = OVERWRITE_LEFT;
-    } else if (mode == OVERWRITE_LEFT) {
-      mode = OVERWRITE_RIGHT;
-    }
-  }
-  switch (mode) {
-    case OVERWRITE_LEFT: {
-      // If the argument in edx is already an object, we skip the
-      // allocation of a heap number.
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
-      // Allocate a heap number for the result. Keep eax and edx intact
-      // for the possible runtime call.
-      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
-      // Now edx can be overwritten losing one of the arguments as we are
-      // now done and will not need it any more.
-      __ mov(edx, Operand(ebx));
-      __ bind(&skip_allocation);
-      // Use object in edx as a result holder
-      __ mov(eax, Operand(edx));
-      break;
-    }
-    case OVERWRITE_RIGHT:
-      // If the argument in eax is already an object, we skip the
-      // allocation of a heap number.
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
-      // Fall through!
-    case NO_OVERWRITE:
-      // Allocate a heap number for the result. Keep eax and edx intact
-      // for the possible runtime call.
-      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
-      // Now eax can be overwritten losing one of the arguments as we are
-      // now done and will not need it any more.
-      __ mov(eax, ebx);
-      __ bind(&skip_allocation);
-      break;
-    default: UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  // If arguments are not passed in registers read them from the stack.
-  ASSERT(!HasArgsInRegisters());
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
-  __ pop(ecx);
-  if (HasArgsReversed()) {
-    __ push(eax);
-    __ push(edx);
-  } else {
-    __ push(edx);
-    __ push(eax);
-  }
-  __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  __ pop(ecx);  // Save return address.
-
-  // Left and right arguments are now on top.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
-  __ push(Immediate(Smi::FromInt(MinorKey())));
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
-  __ push(ecx);  // Push return address.
-
-  // Patch the caller to an appropriate specialized stub and return the
-  // operation result to the caller of the stub.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index d116bf7..80a75cd 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -72,161 +72,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      GenericBinaryFlags flags,
-                      TypeInfo operands_type)
-      : op_(op),
-        mode_(mode),
-        flags_(flags),
-        args_in_registers_(false),
-        args_reversed_(false),
-        static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) {
-    if (static_operands_type_.IsSmi()) {
-      mode_ = NO_OVERWRITE;
-    }
-    use_sse3_ = CpuFeatures::IsSupported(SSE3);
-    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
-  }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        flags_(FlagBits::decode(key)),
-        args_in_registers_(ArgsInRegistersBits::decode(key)),
-        args_reversed_(ArgsReversedBits::decode(key)),
-        use_sse3_(SSE3Bits::decode(key)),
-        static_operands_type_(TypeInfo::ExpandedRepresentation(
-            StaticTypeInfoBits::decode(key))),
-        runtime_operands_type_(runtime_operands_type),
-        name_(NULL) {
-  }
-
-  // Generate code to call the stub with the supplied arguments. This will add
-  // code at the call site to prepare arguments either in registers or on the
-  // stack together with the actual call.
-  void GenerateCall(MacroAssembler* masm, Register left, Register right);
-  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
-  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
-  bool ArgsInRegistersSupported() {
-    return op_ == Token::ADD || op_ == Token::SUB
-        || op_ == Token::MUL || op_ == Token::DIV;
-  }
-
-  void SetArgsInRegisters() {
-    ASSERT(ArgsInRegistersSupported());
-    args_in_registers_ = true;
-  }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  GenericBinaryFlags flags_;
-  bool args_in_registers_;  // Arguments passed in registers not on the stack.
-  bool args_reversed_;  // Left and right argument are swapped.
-  bool use_sse3_;
-
-  // Number type information of operands, determined by code generator.
-  TypeInfo static_operands_type_;
-
-  // Operand type information determined at runtime.
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           static_cast<int>(flags_),
-           static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_),
-           static_operands_type_.ToString());
-  }
-#endif
-
-  // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 7> {};
-  class SSE3Bits: public BitField<bool, 9, 1> {};
-  class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
-  class ArgsReversedBits: public BitField<bool, 11, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | FlagBits::encode(flags_)
-           | SSE3Bits::encode(use_sse3_)
-           | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_)
-           | StaticTypeInfoBits::encode(
-                 static_operands_type_.ThreeBitRepresentation())
-           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
-  }
-
-  void Generate(MacroAssembler* masm);
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-  void GenerateLoadArguments(MacroAssembler* masm);
-  void GenerateReturn(MacroAssembler* masm);
-  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
-  void GenerateRegisterArgsPush(MacroAssembler* masm);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  bool IsOperationCommutative() {
-    return (op_ == Token::ADD) || (op_ == Token::MUL);
-  }
-
-  void SetArgsReversed() { args_reversed_ = true; }
-  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgsInRegisters() { return args_in_registers_; }
-  bool HasArgsReversed() { return args_reversed_; }
-
-  bool ShouldGenerateSmiCode() {
-    return HasSmiCodeInStub() &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
-  friend class CodeGenerator;
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
deleted file mode 100644
index 49c706d..0000000
--- a/src/ia32/codegen-ia32-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_IA32_CODEGEN_IA32_INL_H_
-#define V8_IA32_CODEGEN_IA32_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 8a47e72..572c36c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,81 +29,15 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ push(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
-      __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
-    }
-  }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
-  // Restore registers in reverse order due to the stack.
-  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ pop(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore) {
-      action &= ~kSyncedFlag;
-      __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
-    }
-  }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
-  frame_state_.Restore(masm_);
-}
-
 
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  frame_state_->Restore(masm);
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -114,10062 +48,8 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      destination_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
-                           ControlDestination* destination)
-    : owner_(owner),
-      destination_(destination),
-      previous_(owner->state()) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      in_safe_int32_mode_(false),
-      safe_int32_mode_enabled_(true),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false),
-      jit_cookie_((FLAG_mask_constants_with_cookie) ?
-                  V8::RandomPrivate(Isolate::Current()) : 0) {
-}
-
-
-// Calling conventions:
-// ebp: caller's frame pointer
-// esp: stack pointer
-// edi: called JS function
-// esi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  set_in_spilled_code(false);
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  masm()->isolate()->set_jump_target_compiling_deferred_code(false);
-
-  {
-    CodeGenState state(this);
-
-    // Entry:
-    // Stack: receiver, arguments, return address.
-    // ebp: caller's frame pointer
-    // esp: stack pointer
-    // edi: called JS function
-    // esi: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ int3();
-    }
-#endif
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Allocate the local context if needed.
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      Comment cmnt(masm_, "[ allocate local context");
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      frame_->PushFunction();
-      Result context;
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        context = frame_->CallStub(&stub, 1);
-      } else {
-        context = frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-      // Update context local.
-      frame_->SaveContextRegister();
-
-      // Verify that the runtime call result and esi agree.
-      if (FLAG_debug_code) {
-        __ cmp(context.reg(), Operand(esi));
-        __ Assert(equal, "Runtime::NewContext should end up in esi");
-      }
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // The use of SlotOperand below is safe in unspilled code
-          // because the slot is guaranteed to be a context slot.
-          //
-          // There are no parameters in the global scope.
-          ASSERT(!scope()->is_global_scope());
-          frame_->PushParameterAt(i);
-          Result value = frame_->Pop();
-          value.ToRegister();
-
-          // SlotOperand loads context.reg() with the context object
-          // stored to, used below in RecordWrite.
-          Result context = allocator_->Allocate();
-          ASSERT(context.is_valid());
-          __ mov(SlotOperand(slot, context.reg()), value.reg());
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          Result scratch = allocator_->Allocate();
-          ASSERT(scratch.is_valid());
-          frame_->Spill(context.reg());
-          frame_->Spill(value.reg());
-          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->Push(FACTORY->the_hole_value());
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-    CheckStack();
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = info->isolate()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-
-      // Handle the return from the function.
-      if (has_valid_frame()) {
-        // If there is a valid frame, control flow can fall off the end of
-        // the body.  In that case there is an implicit return statement.
-        ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(info->function());
-        frame_->PrepareForReturn();
-        Result undefined(FACTORY->undefined_value());
-        if (function_return_.is_bound()) {
-          function_return_.Jump(&undefined);
-        } else {
-          function_return_.Bind(&undefined);
-          GenerateReturnSequence(&undefined);
-        }
-      } else if (function_return_.is_linked()) {
-        // If the return target has dangling jumps to it, then we have not
-        // yet generated the return sequence.  This can happen when (a)
-        // control does not flow off the end of the body so we did not
-        // compile an artificial return statement just above, and (b) there
-        // are return statements in the body but (c) they are all shadowed.
-        Result return_value;
-        function_return_.Bind(&return_value);
-        GenerateReturnSequence(&return_value);
-      }
-    }
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    info->isolate()->set_jump_target_compiling_deferred_code(true);
-    ProcessDeferred();
-    info->isolate()->set_jump_target_compiling_deferred_code(false);
-  }
-
-  // There is no need to delete the register allocator, it is a
-  // stack-allocated local.
-  allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(esi));  // do not overwrite context register
-      Register context = esi;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return Operand(eax);
-  }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Result tmp,
-                                                         JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  ASSERT(tmp.is_register());
-  Register context = esi;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  slow->Branch(not_equal, not_taken);
-  __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
-                                  ControlDestination* dest,
-                                  bool force_control) {
-  ASSERT(!in_spilled_code());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, dest);
-    Visit(expr);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        !dest->is_used() &&
-        frame_->height() == original_height) {
-      dest->Goto(true);
-    }
-  }
-
-  if (force_control && !dest->is_used()) {
-    // Convert the TOS value into flow to the control destination.
-    ToBoolean(dest);
-  }
-
-  ASSERT(!(force_control && !dest->is_used()));
-  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Load(expression);
-  frame_->SpillAll();
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
-                                         BreakTarget* unsafe_bailout) {
-  set_unsafe_bailout(unsafe_bailout);
-  set_in_safe_int32_mode(true);
-  Load(expr);
-  Result value = frame_->Pop();
-  ASSERT(frame_->HasNoUntaggedInt32Elements());
-  if (expr->GuaranteedSmiResult()) {
-    ConvertInt32ResultToSmi(&value);
-  } else {
-    ConvertInt32ResultToNumber(&value);
-  }
-  set_in_safe_int32_mode(false);
-  set_unsafe_bailout(NULL);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
-  set_safe_int32_mode_enabled(false);
-  Load(expr);
-  set_safe_int32_mode_enabled(true);
-}
-
-
-void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
-  ASSERT(value->is_untagged_int32());
-  if (value->is_register()) {
-    __ add(value->reg(), Operand(value->reg()));
-  } else {
-    ASSERT(value->is_constant());
-    ASSERT(value->handle()->IsSmi());
-  }
-  value->set_untagged_int32(false);
-  value->set_type_info(TypeInfo::Smi());
-}
-
-
-void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
-  ASSERT(value->is_untagged_int32());
-  if (value->is_register()) {
-    Register val = value->reg();
-    JumpTarget done;
-    __ add(val, Operand(val));
-    done.Branch(no_overflow, value);
-    __ sar(val, 1);
-    // If there was an overflow, bits 30 and 31 of the original number disagree.
-    __ xor_(val, 0x80000000u);
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ cvtsi2sd(xmm0, Operand(val));
-    } else {
-      // Move val to ST[0] in the FPU
-      // Push and pop are safe with respect to the virtual frame because
-      // all synced elements are below the actual stack pointer.
-      __ push(val);
-      __ fild_s(Operand(esp, 0));
-      __ pop(val);
-    }
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_register());
-    Label allocation_failed;
-    __ AllocateHeapNumber(val, scratch.reg(),
-                          no_reg, &allocation_failed);
-    VirtualFrame* clone = new VirtualFrame(frame_);
-    scratch.Unuse();
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
-    } else {
-      __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
-    }
-    done.Jump(value);
-
-    // Establish the virtual frame, cloned from where AllocateHeapNumber
-    // jumped to allocation_failed.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    __ bind(&allocation_failed);
-    if (!CpuFeatures::IsSupported(SSE2)) {
-      // Pop the value from the floating point stack.
-      __ fstp(0);
-    }
-    unsafe_bailout_->Jump();
-
-    done.Bind(value);
-  } else {
-    ASSERT(value->is_constant());
-  }
-  value->set_untagged_int32(false);
-  value->set_type_info(TypeInfo::Integer32());
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-
-  // If the expression should be a side-effect-free 32-bit int computation,
-  // compile that SafeInt32 path, and a bailout path.
-  if (!in_safe_int32_mode() &&
-      safe_int32_mode_enabled() &&
-      expr->side_effect_free() &&
-      expr->num_bit_ops() > 2 &&
-      CpuFeatures::IsSupported(SSE2)) {
-    BreakTarget unsafe_bailout;
-    JumpTarget done;
-    unsafe_bailout.set_expected_height(frame_->height());
-    LoadInSafeInt32Mode(expr, &unsafe_bailout);
-    done.Jump();
-
-    if (unsafe_bailout.is_linked()) {
-      unsafe_bailout.Bind();
-      LoadWithSafeInt32ModeDisabled(expr);
-    }
-    done.Bind();
-  } else {
-    JumpTarget true_target;
-    JumpTarget false_target;
-    ControlDestination dest(&true_target, &false_target, true);
-    LoadCondition(expr, &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The false target was just bound.
-      JumpTarget loaded;
-      frame_->Push(FACTORY->false_value());
-      // There may be dangling jumps to the true target.
-      if (true_target.is_linked()) {
-        loaded.Jump();
-        true_target.Bind();
-        frame_->Push(FACTORY->true_value());
-        loaded.Bind();
-      }
-
-    } else if (dest.is_used()) {
-      // There is true, and possibly false, control flow (with true as
-      // the fall through).
-      JumpTarget loaded;
-      frame_->Push(FACTORY->true_value());
-      if (false_target.is_linked()) {
-        loaded.Jump();
-        false_target.Bind();
-        frame_->Push(FACTORY->false_value());
-        loaded.Bind();
-      }
-
-    } else {
-      // We have a valid value on top of the frame, but we still may
-      // have dangling jumps to the true and false targets from nested
-      // subexpressions (eg, the left subexpressions of the
-      // short-circuited boolean operators).
-      ASSERT(has_valid_frame());
-      if (true_target.is_linked() || false_target.is_linked()) {
-        JumpTarget loaded;
-        loaded.Jump();  // Don't lose the current TOS.
-        if (true_target.is_linked()) {
-          true_target.Bind();
-          frame_->Push(FACTORY->true_value());
-          if (false_target.is_linked()) {
-            loaded.Jump();
-          }
-        }
-        if (false_target.is_linked()) {
-          false_target.Bind();
-          frame_->Push(FACTORY->false_value());
-        }
-        loaded.Bind();
-      }
-    }
-  }
-  ASSERT(has_valid_frame());
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  if (in_spilled_code()) {
-    frame_->EmitPush(GlobalObjectOperand());
-  } else {
-    Result temp = allocator_->Allocate();
-    __ mov(temp.reg(), GlobalObjectOperand());
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
-  Result temp = allocator_->Allocate();
-  Register reg = temp.reg();
-  __ mov(reg, GlobalObjectOperand());
-  __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the arguments marker value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->Push(FACTORY->arguments_marker());
-  } else {
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    frame_->PushFunction();
-    frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope()->num_parameters()));
-    Result result = frame_->CallStub(&stub, 3);
-    frame_->Push(&result);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  bool skip_arguments = false;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
-    if (probe.is_constant()) {
-      // We have to skip updating the arguments object if it has
-      // been assigned a proper value.
-      skip_arguments = !probe.handle()->IsArgumentsMarker();
-    } else {
-      __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
-      probe.Unuse();
-      done.Branch(not_equal);
-    }
-  }
-  if (!skip_arguments) {
-    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  }
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-  return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  // References are loaded from both spilled and unspilled code.  Set the
-  // state to unspilled to allow that (and explicitly spill after
-  // construction at the construction sites).
-  bool was_in_spilled_code = in_spilled_code_;
-  in_spilled_code_ = false;
-
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      // If eax is free, the register allocator prefers it.  Thus the code
-      // generator will load the global object into eax, which is where
-      // LoadIC wants it.  Most uses of Reference call LoadIC directly
-      // after the reference is created.
-      frame_->Spill(eax);
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-
-  in_spilled_code_ = was_in_spilled_code;
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
-  Comment cmnt(masm_, "[ ToBoolean");
-
-  // The value to convert should be popped from the frame.
-  Result value = frame_->Pop();
-  value.ToRegister();
-
-  if (value.is_integer32()) {  // Also takes Smi case.
-    Comment cmnt(masm_, "ONLY_INTEGER_32");
-    if (FLAG_debug_code) {
-      Label ok;
-      __ AbortIfNotNumber(value.reg());
-      __ test(value.reg(), Immediate(kSmiTagMask));
-      __ j(zero, &ok);
-      __ fldz();
-      __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
-      __ FCmp();
-      __ j(not_zero, &ok);
-      __ Abort("Smi was wrapped in HeapNumber in output from bitop");
-      __ bind(&ok);
-    }
-    // In the integer32 case there are no Smis hidden in heap numbers, so we
-    // need only test for Smi zero.
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    value.Unuse();
-    dest->Split(not_zero);
-  } else if (value.is_number()) {
-    Comment cmnt(masm_, "ONLY_NUMBER");
-    // Fast case if TypeInfo indicates only numbers.
-    if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg());
-    }
-    // Smi => false iff zero.
-    STATIC_ASSERT(kSmiTag == 0);
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    __ test(value.reg(), Immediate(kSmiTagMask));
-    dest->true_target()->Branch(zero);
-    __ fldz();
-    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
-    __ FCmp();
-    value.Unuse();
-    dest->Split(not_zero);
-  } else {
-    // Fast case checks.
-    // 'false' => false.
-    __ cmp(value.reg(), FACTORY->false_value());
-    dest->false_target()->Branch(equal);
-
-    // 'true' => true.
-    __ cmp(value.reg(), FACTORY->true_value());
-    dest->true_target()->Branch(equal);
-
-    // 'undefined' => false.
-    __ cmp(value.reg(), FACTORY->undefined_value());
-    dest->false_target()->Branch(equal);
-
-    // Smi => false iff zero.
-    STATIC_ASSERT(kSmiTag == 0);
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    __ test(value.reg(), Immediate(kSmiTagMask));
-    dest->true_target()->Branch(zero);
-
-    // Call the stub for all other cases.
-    frame_->Push(&value);  // Undo the Pop() from above.
-    ToBooleanStub stub;
-    Result temp = frame_->CallStub(&stub, 1);
-    // Convert the result to a condition code.
-    __ test(temp.reg(), Operand(temp.reg()));
-    temp.Unuse();
-    dest->Split(not_equal);
-  }
-}
-
-
-// Perform or call the specialized stub for a binary operation.  Requires the
-// three registers left, right and dst to be distinct and spilled.  This
-// deferred operation has up to three entry points:  The main one calls the
-// runtime system.  The second is for when the result is a non-Smi.  The
-// third is for when at least one of the inputs is non-Smi and we have SSE2.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
-  DeferredInlineBinaryOperation(Token::Value op,
-                                Register dst,
-                                Register left,
-                                Register right,
-                                TypeInfo left_info,
-                                TypeInfo right_info,
-                                OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right),
-        left_info_(left_info), right_info_(right_info), mode_(mode) {
-    set_comment("[ DeferredInlineBinaryOperation");
-    ASSERT(!left.is(right));
-  }
-
-  virtual void Generate();
-
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit().
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToAnswerOutOfRange(Condition cond);
-  void JumpToConstantRhs(Condition cond, Smi* smi_value);
-  Label* NonSmiInputLabel();
-
- private:
-  void GenerateAnswerOutOfRange();
-  void GenerateNonSmiInput();
-
-  Token::Value op_;
-  Register dst_;
-  Register left_;
-  Register right_;
-  TypeInfo left_info_;
-  TypeInfo right_info_;
-  OverwriteMode mode_;
-  Label answer_out_of_range_;
-  Label non_smi_input_;
-  Label constant_rhs_;
-  Smi* smi_value_;
-};
-
-
-Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
-  if (Token::IsBitOp(op_) &&
-      CpuFeatures::IsSupported(SSE2)) {
-    return &non_smi_input_;
-  } else {
-    return entry_label();
-  }
-}
-
-
-void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
-  __ j(cond, &answer_out_of_range_);
-}
-
-
-void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
-                                                      Smi* smi_value) {
-  smi_value_ = smi_value;
-  __ j(cond, &constant_rhs_);
-}
-
-
-void DeferredInlineBinaryOperation::Generate() {
-  // Registers are not saved implicitly for this stub, so we should not
-  // tread on the registers that were not passed to us.
-  if (CpuFeatures::IsSupported(SSE2) &&
-      ((op_ == Token::ADD) ||
-       (op_ == Token::SUB) ||
-       (op_ == Token::MUL) ||
-       (op_ == Token::DIV))) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    Label call_runtime, after_alloc_failure;
-    Label left_smi, right_smi, load_right, do_op;
-    if (!left_info_.IsSmi()) {
-      __ test(left_, Immediate(kSmiTagMask));
-      __ j(zero, &left_smi);
-      if (!left_info_.IsNumber()) {
-        __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        __ j(not_equal, &call_runtime);
-      }
-      __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-      if (mode_ == OVERWRITE_LEFT) {
-        __ mov(dst_, left_);
-      }
-      __ jmp(&load_right);
-
-      __ bind(&left_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left_);
-    }
-    __ SmiUntag(left_);
-    __ cvtsi2sd(xmm0, Operand(left_));
-    __ SmiTag(left_);
-    if (mode_ == OVERWRITE_LEFT) {
-      Label alloc_failure;
-      __ push(left_);
-      __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-      __ pop(left_);
-    }
-
-    __ bind(&load_right);
-    if (!right_info_.IsSmi()) {
-      __ test(right_, Immediate(kSmiTagMask));
-      __ j(zero, &right_smi);
-      if (!right_info_.IsNumber()) {
-        __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        __ j(not_equal, &call_runtime);
-      }
-      __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-      if (mode_ == OVERWRITE_RIGHT) {
-        __ mov(dst_, right_);
-      } else if (mode_ == NO_OVERWRITE) {
-        Label alloc_failure;
-        __ push(left_);
-        __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-        __ pop(left_);
-      }
-      __ jmp(&do_op);
-
-      __ bind(&right_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right_);
-    }
-    __ SmiUntag(right_);
-    __ cvtsi2sd(xmm1, Operand(right_));
-    __ SmiTag(right_);
-    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      __ push(left_);
-      __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-      __ pop(left_);
-    }
-
-    __ bind(&do_op);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
-    }
-    __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    Exit();
-
-
-    __ bind(&after_alloc_failure);
-    __ pop(left_);
-    __ bind(&call_runtime);
-  }
-  // Register spilling is not done implicitly for this stub.
-  // We can't postpone it any more now though.
-  SaveRegisters();
-
-  GenericBinaryOpStub stub(op_,
-                           mode_,
-                           NO_SMI_CODE_IN_STUB,
-                           TypeInfo::Combine(left_info_, right_info_));
-  stub.GenerateCall(masm_, left_, right_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-  RestoreRegisters();
-  Exit();
-
-  if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
-    GenerateNonSmiInput();
-  }
-  if (answer_out_of_range_.is_linked()) {
-    GenerateAnswerOutOfRange();
-  }
-}
-
-
-void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
-  // We know at least one of the inputs was not a Smi.
-  // This is a third entry point into the deferred code.
-  // We may not overwrite left_ because we want to be able
-  // to call the handling code for non-smi answer and it
-  // might want to overwrite the heap number in left_.
-  ASSERT(!right_.is(dst_));
-  ASSERT(!left_.is(dst_));
-  ASSERT(!left_.is(right_));
-  // This entry point is used for bit ops where the right hand side
-  // is a constant Smi and the left hand side is a heap object.  It
-  // is also used for bit ops where both sides are unknown, but where
-  // at least one of them is a heap object.
-  bool rhs_is_constant = constant_rhs_.is_linked();
-  // We can't generate code for both cases.
-  ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
-
-  if (FLAG_debug_code) {
-    __ int3();  // We don't fall through into this code.
-  }
-
-  __ bind(&non_smi_input_);
-
-  if (rhs_is_constant) {
-    __ bind(&constant_rhs_);
-    // In this case the input is a heap object and it is in the dst_ register.
-    // The left_ and right_ registers have not been initialized yet.
-    __ mov(right_, Immediate(smi_value_));
-    __ mov(left_, Operand(dst_));
-    if (!CpuFeatures::IsSupported(SSE2)) {
-      __ jmp(entry_label());
-      return;
-    } else {
-      CpuFeatures::Scope use_sse2(SSE2);
-      __ JumpIfNotNumber(dst_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_);
-    }
-  } else {
-    // We know we have SSE2 here because otherwise the label is not linked (see
-    // NonSmiInputLabel).
-    CpuFeatures::Scope use_sse2(SSE2);
-    // Handle the non-constant right hand side situation:
-    if (left_info_.IsSmi()) {
-      // Right is a heap object.
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
-      __ mov(dst_, Operand(left_));
-      __ SmiUntag(dst_);
-    } else if (right_info_.IsSmi()) {
-      // Left is a heap object.
-      __ JumpIfNotNumber(left_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_);
-    } else {
-      // Here we don't know if it's one or both that is a heap object.
-      Label only_right_is_heap_object, got_both;
-      __ mov(dst_, Operand(left_));
-      __ SmiUntag(dst_, &only_right_is_heap_object);
-      // Left was a heap object.
-      __ JumpIfNotNumber(left_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_, &got_both);
-      // Both were heap objects.
-      __ rcl(right_, 1);  // Put tag back.
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
-      __ jmp(&got_both);
-      __ bind(&only_right_is_heap_object);
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
-      __ bind(&got_both);
-    }
-  }
-  ASSERT(op_ == Token::BIT_AND ||
-         op_ == Token::BIT_OR ||
-         op_ == Token::BIT_XOR ||
-         right_.is(ecx));
-  switch (op_) {
-    case Token::BIT_AND: __ and_(dst_, Operand(right_));  break;
-    case Token::BIT_OR:   __ or_(dst_, Operand(right_));  break;
-    case Token::BIT_XOR: __ xor_(dst_, Operand(right_));  break;
-    case Token::SHR:     __ shr_cl(dst_);  break;
-    case Token::SAR:     __ sar_cl(dst_);  break;
-    case Token::SHL:     __ shl_cl(dst_);  break;
-    default: UNREACHABLE();
-  }
-  if (op_ == Token::SHR) {
-    // Check that the *unsigned* result fits in a smi.  Neither of
-    // the two high-order bits can be set:
-    //  * 0x80000000: high bit would be lost when smi tagging.
-    //  * 0x40000000: this number would convert to negative when smi
-    //    tagging.
-    __ test(dst_, Immediate(0xc0000000));
-    __ j(not_zero, &answer_out_of_range_);
-  } else {
-    // Check that the *signed* result fits in a smi.
-    __ cmp(dst_, 0xc0000000);
-    __ j(negative, &answer_out_of_range_);
-  }
-  __ SmiTag(dst_);
-  Exit();
-}
-
-
-void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
-  Label after_alloc_failure2;
-  Label allocation_ok;
-  __ bind(&after_alloc_failure2);
-  // We have to allocate a number, causing a GC, while keeping hold of
-  // the answer in dst_.  The answer is not a Smi.  We can't just call the
-  // runtime shift function here because we already threw away the inputs.
-  __ xor_(left_, Operand(left_));
-  __ shl(dst_, 1);  // Put top bit in carry flag and Smi tag the low bits.
-  __ rcr(left_, 1);  // Rotate with carry.
-  __ push(dst_);   // Smi tagged low 31 bits.
-  __ push(left_);  // 0 or 0x80000000, which is Smi tagged in both cases.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  if (!left_.is(eax)) {
-    __ mov(left_, eax);
-  }
-  __ pop(right_);   // High bit.
-  __ pop(dst_);     // Low 31 bits.
-  __ shr(dst_, 1);  // Put 0 in top bit.
-  __ or_(dst_, Operand(right_));
-  __ jmp(&allocation_ok);
-
-  // This is the second entry point to the deferred code.  It is used only by
-  // the bit operations.
-  // The dst_ register has the answer.  It is not Smi tagged.  If mode_ is
-  // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
-  // or a Smi.
-  // Put a heap number pointer in left_.
-  __ bind(&answer_out_of_range_);
-  SaveRegisters();
-  if (mode_ == OVERWRITE_LEFT) {
-    __ test(left_, Immediate(kSmiTagMask));
-    __ j(not_zero, &allocation_ok);
-  }
-  // This trashes right_.
-  __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
-  __ bind(&allocation_ok);
-  if (CpuFeatures::IsSupported(SSE2) &&
-      op_ != Token::SHR) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    ASSERT(Token::IsBitOp(op_));
-    // Signed conversion.
-    __ cvtsi2sd(xmm0, Operand(dst_));
-    __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
-  } else {
-    if (op_ == Token::SHR) {
-      __ push(Immediate(0));  // High word of unsigned value.
-      __ push(dst_);
-      __ fild_d(Operand(esp, 0));
-      __ Drop(2);
-    } else {
-      ASSERT(Token::IsBitOp(op_));
-      __ push(dst_);
-      __ fild_s(Operand(esp, 0));  // Signed conversion.
-      __ pop(dst_);
-    }
-    __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
-  }
-  __ mov(dst_, left_);
-  RestoreRegisters();
-  Exit();
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
-                                  Token::Value op,
-                                  const Result& right,
-                                  const Result& left) {
-  // Set TypeInfo of result according to the operation performed.
-  // Rely on the fact that smis have a 31 bit payload on ia32.
-  STATIC_ASSERT(kSmiValueSize == 31);
-  switch (op) {
-    case Token::COMMA:
-      return right.type_info();
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      return operands_type;
-    case Token::BIT_AND: {
-      // Anding with positive Smis will give you a Smi.
-      if (right.is_constant() && right.handle()->IsSmi() &&
-          Smi::cast(*right.handle())->value() >= 0) {
-        return TypeInfo::Smi();
-      } else if (left.is_constant() && left.handle()->IsSmi() &&
-          Smi::cast(*left.handle())->value() >= 0) {
-        return TypeInfo::Smi();
-      }
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    }
-    case Token::BIT_OR: {
-      // Oring with negative Smis will give you a Smi.
-      if (right.is_constant() && right.handle()->IsSmi() &&
-          Smi::cast(*right.handle())->value() < 0) {
-        return TypeInfo::Smi();
-      } else if (left.is_constant() && left.handle()->IsSmi() &&
-          Smi::cast(*left.handle())->value() < 0) {
-        return TypeInfo::Smi();
-      }
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    }
-    case Token::BIT_XOR:
-      // Result is always a 32 bit integer. Smi property of inputs is preserved.
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    case Token::SAR:
-      if (left.is_smi()) return TypeInfo::Smi();
-      // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
-      // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
-      return (right.is_constant() && right.handle()->IsSmi()
-              && (Smi::cast(*right.handle())->value() & 0x1F)  >= 1)
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    case Token::SHR:
-      // Result is a smi if we shift by a constant >= 2, an integer32 if
-      // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
-      if (right.is_constant() && right.handle()->IsSmi()) {
-        int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
-        if (shift_amount > 1) {
-          return TypeInfo::Smi();
-        } else if (shift_amount > 0) {
-          return TypeInfo::Integer32();
-        }
-      }
-      return TypeInfo::Number();
-    case Token::ADD:
-      if (operands_type.IsSmi()) {
-        // The Integer32 range is big enough to take the sum of any two Smis.
-        return TypeInfo::Integer32();
-      } else if (operands_type.IsNumber()) {
-        return TypeInfo::Number();
-      } else if (left.type_info().IsString() || right.type_info().IsString()) {
-        return TypeInfo::String();
-      } else {
-        return TypeInfo::Unknown();
-      }
-    case Token::SHL:
-      return TypeInfo::Integer32();
-    case Token::SUB:
-      // The Integer32 range is big enough to take the difference of any two
-      // Smis.
-      return (operands_type.IsSmi()) ?
-                    TypeInfo::Integer32() :
-                    TypeInfo::Number();
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      return TypeInfo::Number();
-    default:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
-                                           OverwriteMode overwrite_mode) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Comment cmnt_token(masm_, Token::String(op));
-
-  if (op == Token::COMMA) {
-    // Simply discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  if (op == Token::ADD) {
-    const bool left_is_string = left.type_info().IsString();
-    const bool right_is_string = right.type_info().IsString();
-    // Make sure constant strings have string type info.
-    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
-           left_is_string);
-    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
-           right_is_string);
-    if (left_is_string || right_is_string) {
-      frame_->Push(&left);
-      frame_->Push(&right);
-      Result answer;
-      if (left_is_string) {
-        if (right_is_string) {
-          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        } else {
-          StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        }
-      } else if (right_is_string) {
-        StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
-        answer = frame_->CallStub(&stub, 2);
-      }
-      answer.set_type_info(TypeInfo::String());
-      frame_->Push(&answer);
-      return;
-    }
-    // Neither operand is known to be a string.
-  }
-
-  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi_constant =
-      right.is_constant() && !right.handle()->IsSmi();
-
-  if (left_is_smi_constant && right_is_smi_constant) {
-    // Compute the constant result at compile time, and leave it on the frame.
-    int left_int = Smi::cast(*left.handle())->value();
-    int right_int = Smi::cast(*right.handle())->value();
-    if (FoldConstantSmis(op, left_int, right_int)) return;
-  }
-
-  // Get number type of left and right sub-expressions.
-  TypeInfo operands_type =
-      TypeInfo::Combine(left.type_info(), right.type_info());
-
-  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
-  Result answer;
-  if (left_is_non_smi_constant || right_is_non_smi_constant) {
-    // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op,
-                             overwrite_mode,
-                             NO_SMI_CODE_IN_STUB,
-                             operands_type);
-    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-  } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
-                                        false, overwrite_mode);
-  } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
-                                        true, overwrite_mode);
-  } else {
-    // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on Smis. Still only
-    // generate the inline Smi check code if this operation is part of a loop.
-    // For all other operations only inline the Smi check code for likely smis
-    // if the operation is part of a loop.
-    if (loop_nesting() > 0 &&
-        (Token::IsBitOp(op) ||
-         operands_type.IsInteger32() ||
-         expr->type()->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
-    } else {
-      GenericBinaryOpStub stub(op,
-                               overwrite_mode,
-                               NO_GENERIC_BINARY_FLAGS,
-                               operands_type);
-      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-    }
-  }
-
-  answer.set_type_info(result_type);
-  frame_->Push(&answer);
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                                      Result* left,
-                                                      Result* right) {
-  if (stub->ArgsInRegistersSupported()) {
-    stub->SetArgsInRegisters();
-    return frame_->CallStub(stub, left, right);
-  } else {
-    frame_->Push(left);
-    frame_->Push(right);
-    return frame_->CallStub(stub, 2);
-  }
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  Object* answer_object = HEAP->undefined_value();
-  switch (op) {
-    case Token::ADD:
-      if (Smi::IsValid(left + right)) {
-        answer_object = Smi::FromInt(left + right);
-      }
-      break;
-    case Token::SUB:
-      if (Smi::IsValid(left - right)) {
-        answer_object = Smi::FromInt(left - right);
-      }
-      break;
-    case Token::MUL: {
-        double answer = static_cast<double>(left) * right;
-        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
-          // If the product is zero and the non-zero factor is negative,
-          // the spec requires us to return floating point negative zero.
-          if (answer != 0 || (left >= 0 && right >= 0)) {
-            answer_object = Smi::FromInt(static_cast<int>(answer));
-          }
-        }
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD:
-      break;
-    case Token::BIT_OR:
-      answer_object = Smi::FromInt(left | right);
-      break;
-    case Token::BIT_AND:
-      answer_object = Smi::FromInt(left & right);
-      break;
-    case Token::BIT_XOR:
-      answer_object = Smi::FromInt(left ^ right);
-      break;
-
-    case Token::SHL: {
-        int shift_amount = right & 0x1F;
-        if (Smi::IsValid(left << shift_amount)) {
-          answer_object = Smi::FromInt(left << shift_amount);
-        }
-        break;
-      }
-    case Token::SHR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        unsigned_left >>= shift_amount;
-        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
-          answer_object = Smi::FromInt(unsigned_left);
-        }
-        break;
-      }
-    case Token::SAR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        if (left < 0) {
-          // Perform arithmetic shift of a negative number by
-          // complementing number, logical shifting, complementing again.
-          unsigned_left = ~unsigned_left;
-          unsigned_left >>= shift_amount;
-          unsigned_left = ~unsigned_left;
-        } else {
-          unsigned_left >>= shift_amount;
-        }
-        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
-        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
-        break;
-      }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  if (answer_object->IsUndefined()) {
-    return false;
-  }
-  frame_->Push(Handle<Object>(answer_object));
-  return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
-                                               Result* right,
-                                               JumpTarget* both_smi) {
-  TypeInfo left_info = left->type_info();
-  TypeInfo right_info = right->type_info();
-  if (left_info.IsDouble() || left_info.IsString() ||
-      right_info.IsDouble() || right_info.IsString()) {
-    // We know that left and right are not both smi.  Don't do any tests.
-    return;
-  }
-
-  if (left->reg().is(right->reg())) {
-    if (!left_info.IsSmi()) {
-      __ test(left->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(), left->reg());
-      __ or_(temp.reg(), Operand(right->reg()));
-      __ test(temp.reg(), Immediate(kSmiTagMask));
-      temp.Unuse();
-      both_smi->Branch(zero);
-    } else {
-      __ test(left->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    if (!right_info.IsSmi()) {
-      __ test(right->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  Register scratch,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  DeferredCode* deferred) {
-  JumpIfNotBothSmiUsingTypeInfo(left,
-                                right,
-                                scratch,
-                                left_info,
-                                right_info,
-                                deferred->entry_label());
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  Register scratch,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  Label* on_not_smi) {
-  if (left.is(right)) {
-    if (!left_info.IsSmi()) {
-      __ test(left, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left);
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      __ mov(scratch, left);
-      __ or_(scratch, Operand(right));
-      __ test(scratch, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      __ test(left, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-      if (FLAG_debug_code) __ AbortIfNotSmi(right);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left);
-    if (!right_info.IsSmi()) {
-      __ test(right, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right);
-    }
-  }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
-                                               Result* left,
-                                               Result* right,
-                                               OverwriteMode overwrite_mode) {
-  // Copy the type info because left and right may be overwritten.
-  TypeInfo left_type_info = left->type_info();
-  TypeInfo right_type_info = right->type_info();
-  Token::Value op = expr->op();
-  Result answer;
-  // Special handling of div and mod because they use fixed registers.
-  if (op == Token::DIV || op == Token::MOD) {
-    // We need eax as the quotient register, edx as the remainder
-    // register, neither left nor right in eax or edx, and left copied
-    // to eax.
-    Result quotient;
-    Result remainder;
-    bool left_is_in_eax = false;
-    // Step 1: get eax for quotient.
-    if ((left->is_register() && left->reg().is(eax)) ||
-        (right->is_register() && right->reg().is(eax))) {
-      // One or both is in eax.  Use a fresh non-edx register for
-      // them.
-      Result fresh = allocator_->Allocate();
-      ASSERT(fresh.is_valid());
-      if (fresh.reg().is(edx)) {
-        remainder = fresh;
-        fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-      }
-      if (left->is_register() && left->reg().is(eax)) {
-        quotient = *left;
-        *left = fresh;
-        left_is_in_eax = true;
-      }
-      if (right->is_register() && right->reg().is(eax)) {
-        quotient = *right;
-        *right = fresh;
-      }
-      __ mov(fresh.reg(), eax);
-    } else {
-      // Neither left nor right is in eax.
-      quotient = allocator_->Allocate(eax);
-    }
-    ASSERT(quotient.is_register() && quotient.reg().is(eax));
-    ASSERT(!(left->is_register() && left->reg().is(eax)));
-    ASSERT(!(right->is_register() && right->reg().is(eax)));
-
-    // Step 2: get edx for remainder if necessary.
-    if (!remainder.is_valid()) {
-      if ((left->is_register() && left->reg().is(edx)) ||
-          (right->is_register() && right->reg().is(edx))) {
-        Result fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-        if (left->is_register() && left->reg().is(edx)) {
-          remainder = *left;
-          *left = fresh;
-        }
-        if (right->is_register() && right->reg().is(edx)) {
-          remainder = *right;
-          *right = fresh;
-        }
-        __ mov(fresh.reg(), edx);
-      } else {
-        // Neither left nor right is in edx.
-        remainder = allocator_->Allocate(edx);
-      }
-    }
-    ASSERT(remainder.is_register() && remainder.reg().is(edx));
-    ASSERT(!(left->is_register() && left->reg().is(edx)));
-    ASSERT(!(right->is_register() && right->reg().is(edx)));
-
-    left->ToRegister();
-    right->ToRegister();
-    frame_->Spill(eax);
-    frame_->Spill(edx);
-    // DeferredInlineBinaryOperation requires all the registers that it is
-    // told about to be spilled and distinct.
-    Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-
-    // Check that left and right are smi tagged.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          (op == Token::DIV) ? eax : edx,
-                                          left->reg(),
-                                          distinct_right.reg(),
-                                          left_type_info,
-                                          right_type_info,
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
-                                  left_type_info, right_type_info, deferred);
-    if (!left_is_in_eax) {
-      __ mov(eax, left->reg());
-    }
-    // Sign extend eax into edx:eax.
-    __ cdq();
-    // Check for 0 divisor.
-    __ test(right->reg(), Operand(right->reg()));
-    deferred->Branch(zero);
-    // Divide edx:eax by the right operand.
-    __ idiv(right->reg());
-
-    // Complete the operation.
-    if (op == Token::DIV) {
-      // Check for negative zero result.  If result is zero, and divisor
-      // is negative, return a floating point negative zero.  The
-      // virtual frame is unchanged in this block, so local control flow
-      // can use a Label rather than a JumpTarget.  If the context of this
-      // expression will treat -0 like 0, do not do this test.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(left->reg(), Operand(left->reg()));
-        __ j(not_zero, &non_zero_result);
-        __ test(right->reg(), Operand(right->reg()));
-        deferred->Branch(negative);
-        __ bind(&non_zero_result);
-      }
-      // Check for the corner case of dividing the most negative smi by
-      // -1. We cannot use the overflow flag, since it is not set by
-      // idiv instruction.
-      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      __ cmp(eax, 0x40000000);
-      deferred->Branch(equal);
-      // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
-      deferred->Branch(not_zero);
-      // Tag the result and store it in the quotient register.
-      __ SmiTag(eax);
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = quotient;
-    } else {
-      ASSERT(op == Token::MOD);
-      // Check for a negative zero result.  If the result is zero, and
-      // the dividend is negative, return a floating point negative
-      // zero.  The frame is unchanged in this block, so local control
-      // flow can use a Label rather than a JumpTarget.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(edx, Operand(edx));
-        __ j(not_zero, &non_zero_result, taken);
-        __ test(left->reg(), Operand(left->reg()));
-        deferred->Branch(negative);
-        __ bind(&non_zero_result);
-      }
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = remainder;
-    }
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Special handling of shift operations because they use fixed
-  // registers.
-  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
-    // Move left out of ecx if necessary.
-    if (left->is_register() && left->reg().is(ecx)) {
-      *left = allocator_->Allocate();
-      ASSERT(left->is_valid());
-      __ mov(left->reg(), ecx);
-    }
-    right->ToRegister(ecx);
-    left->ToRegister();
-    ASSERT(left->is_register() && !left->reg().is(ecx));
-    ASSERT(right->is_register() && right->reg().is(ecx));
-    if (left_type_info.IsSmi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    }
-    if (right_type_info.IsSmi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-    }
-
-    // We will modify right, it must be spilled.
-    frame_->Spill(ecx);
-    // DeferredInlineBinaryOperation requires all the registers that it is told
-    // about to be spilled and distinct.  We know that right is ecx and left is
-    // not ecx.
-    frame_->Spill(left->reg());
-
-    // Use a fresh answer register to avoid spilling the left operand.
-    answer = allocator_->Allocate();
-    ASSERT(answer.is_valid());
-
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          answer.reg(),
-                                          left->reg(),
-                                          ecx,
-                                          left_type_info,
-                                          right_type_info,
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                  left_type_info, right_type_info,
-                                  deferred->NonSmiInputLabel());
-
-    // Untag both operands.
-    __ mov(answer.reg(), left->reg());
-    __ SmiUntag(answer.reg());
-    __ SmiUntag(right->reg());  // Right is ecx.
-
-    // Perform the operation.
-    ASSERT(right->reg().is(ecx));
-    switch (op) {
-      case Token::SAR: {
-        __ sar_cl(answer.reg());
-        if (!left_type_info.IsSmi()) {
-          // Check that the *signed* result fits in a smi.
-          __ cmp(answer.reg(), 0xc0000000);
-          deferred->JumpToAnswerOutOfRange(negative);
-        }
-        break;
-      }
-      case Token::SHR: {
-        __ shr_cl(answer.reg());
-        // Check that the *unsigned* result fits in a smi.  Neither of
-        // the two high-order bits can be set:
-        //  * 0x80000000: high bit would be lost when smi tagging.
-        //  * 0x40000000: this number would convert to negative when smi
-        //    tagging.
-        // These two cases can only happen with shifts by 0 or 1 when
-        // handed a valid smi.  If the answer cannot be represented by a
-        // smi, restore the left and right arguments, and jump to slow
-        // case.  The low bit of the left argument may be lost, but only
-        // in a case where it is dropped anyway.
-        __ test(answer.reg(), Immediate(0xc0000000));
-        deferred->JumpToAnswerOutOfRange(not_zero);
-        break;
-      }
-      case Token::SHL: {
-        __ shl_cl(answer.reg());
-        // Check that the *signed* result fits in a smi.
-        __ cmp(answer.reg(), 0xc0000000);
-        deferred->JumpToAnswerOutOfRange(negative);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-    // Smi-tag the result in answer.
-    __ SmiTag(answer.reg());
-    deferred->BindExit();
-    left->Unuse();
-    right->Unuse();
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Handle the other binary operations.
-  left->ToRegister();
-  right->ToRegister();
-  // DeferredInlineBinaryOperation requires all the registers that it is told
-  // about to be spilled.
-  Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-  // A newly allocated register answer is used to hold the answer.  The
-  // registers containing left and right are not modified so they don't
-  // need to be spilled in the fast case.
-  answer = allocator_->Allocate();
-  ASSERT(answer.is_valid());
-
-  // Perform the smi tag check.
-  DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op,
-                                        answer.reg(),
-                                        left->reg(),
-                                        distinct_right.reg(),
-                                        left_type_info,
-                                        right_type_info,
-                                        overwrite_mode);
-  Label non_smi_bit_op;
-  if (op != Token::BIT_OR) {
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                  left_type_info, right_type_info,
-                                  deferred->NonSmiInputLabel());
-  }
-
-  __ mov(answer.reg(), left->reg());
-  switch (op) {
-    case Token::ADD:
-      __ add(answer.reg(), Operand(right->reg()));
-      deferred->Branch(overflow);
-      break;
-
-    case Token::SUB:
-      __ sub(answer.reg(), Operand(right->reg()));
-      deferred->Branch(overflow);
-      break;
-
-    case Token::MUL: {
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      // Remove smi tag from the left operand (but keep sign).
-      // Left-hand operand has been copied into answer.
-      __ SmiUntag(answer.reg());
-      // Do multiplication of smis, leaving result in answer.
-      __ imul(answer.reg(), Operand(right->reg()));
-      // Go slow on overflows.
-      deferred->Branch(overflow);
-      // Check for negative zero result.  If product is zero, and one
-      // argument is negative, go to slow case.  The frame is unchanged
-      // in this block, so local control flow can use a Label rather
-      // than a JumpTarget.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(answer.reg(), Operand(answer.reg()));
-        __ j(not_zero, &non_zero_result, taken);
-        __ mov(answer.reg(), left->reg());
-        __ or_(answer.reg(), Operand(right->reg()));
-        deferred->Branch(negative);
-        __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
-        __ bind(&non_zero_result);
-      }
-      break;
-    }
-
-    case Token::BIT_OR:
-      __ or_(answer.reg(), Operand(right->reg()));
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      __ j(not_zero, deferred->NonSmiInputLabel());
-      break;
-
-    case Token::BIT_AND:
-      __ and_(answer.reg(), Operand(right->reg()));
-      break;
-
-    case Token::BIT_XOR:
-      __ xor_(answer.reg(), Operand(right->reg()));
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  deferred->BindExit();
-  left->Unuse();
-  right->Unuse();
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             Register dst,
-                             Register src,
-                             TypeInfo type_info,
-                             Smi* value,
-                             OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        src_(src),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register src_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
-  // For mod we don't generate all the Smi code inline.
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  stub.GenerateCall(masm_, src_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
-  DeferredInlineSmiOperationReversed(Token::Value op,
-                                     Register dst,
-                                     Smi* value,
-                                     Register src,
-                                     TypeInfo type_info,
-                                     OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        src_(src),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperationReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  Register src_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  stub.GenerateCall(masm_, value_, src_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src + value is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
-  DeferredInlineSmiAdd(Register dst,
-                       TypeInfo type_info,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiAdd");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::ADD,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of value + src is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
-  DeferredInlineSmiAddReversed(Register dst,
-                               TypeInfo type_info,
-                               Smi* value,
-                               OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAddReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::ADD,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, value_, dst_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src - value is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract.  The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
-  DeferredInlineSmiSub(Register dst,
-                       TypeInfo type_info,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiSub");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
-  // Undo the optimistic sub operation and call the shared stub.
-  __ add(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::SUB,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
-  // Generate inline code for a binary operation when one of the
-  // operands is a constant smi.  Consumes the argument "operand".
-  if (IsUnsafeSmi(value)) {
-    Result unsafe_operand(value);
-    if (reversed) {
-      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
-                                      overwrite_mode);
-    } else {
-      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
-                                      overwrite_mode);
-    }
-  }
-
-  // Get the literal value.
-  Smi* smi_value = Smi::cast(*value);
-  int int_value = smi_value->value();
-
-  Token::Value op = expr->op();
-  Result answer;
-  switch (op) {
-    case Token::ADD: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-
-      // Optimistically add.  Call the specialized add stub if the
-      // result is not a smi or overflows.
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
-                                                    operand->type_info(),
-                                                    smi_value,
-                                                    overwrite_mode);
-      } else {
-        deferred = new DeferredInlineSmiAdd(operand->reg(),
-                                            operand->type_info(),
-                                            smi_value,
-                                            overwrite_mode);
-      }
-      __ add(Operand(operand->reg()), Immediate(value));
-      deferred->Branch(overflow);
-      if (!operand->type_info().IsSmi()) {
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::SUB: {
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        // The reversed case is only hit when the right operand is not a
-        // constant.
-        ASSERT(operand->is_register());
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        __ Set(answer.reg(), Immediate(value));
-        deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   operand->type_info(),
-                                                   overwrite_mode);
-        __ sub(answer.reg(), Operand(operand->reg()));
-      } else {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        answer = *operand;
-        deferred = new DeferredInlineSmiSub(operand->reg(),
-                                            operand->type_info(),
-                                            smi_value,
-                                            overwrite_mode);
-        __ sub(Operand(operand->reg()), Immediate(value));
-      }
-      deferred->Branch(overflow);
-      if (!operand->type_info().IsSmi()) {
-        __ test(answer.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      deferred->BindExit();
-      operand->Unuse();
-      break;
-    }
-
-    case Token::SAR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        if (!operand->type_info().IsSmi()) {
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-          if (shift_value > 0) {
-            __ sar(operand->reg(), shift_value);
-            __ and_(operand->reg(), ~kSmiTagMask);
-          }
-          deferred->BindExit();
-        } else {
-          if (FLAG_debug_code) {
-            __ AbortIfNotSmi(operand->reg());
-          }
-          if (shift_value > 0) {
-            __ sar(operand->reg(), shift_value);
-            __ and_(operand->reg(), ~kSmiTagMask);
-          }
-        }
-        answer = *operand;
-      }
-      break;
-
-    case Token::SHR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           answer.reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        if (!operand->type_info().IsSmi()) {
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-        } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(operand->reg());
-        }
-        __ mov(answer.reg(), operand->reg());
-        __ SmiUntag(answer.reg());
-        __ shr(answer.reg(), shift_value);
-        // A negative Smi shifted right two is in the positive Smi range.
-        if (shift_value < 2) {
-          __ test(answer.reg(), Immediate(0xc0000000));
-          deferred->Branch(not_zero);
-        }
-        operand->Unuse();
-        __ SmiTag(answer.reg());
-        deferred->BindExit();
-      }
-      break;
-
-    case Token::SHL:
-      if (reversed) {
-        // Move operand into ecx and also into a second register.
-        // If operand is already in a register, take advantage of that.
-        // This lets us modify ecx, but still bail out to deferred code.
-        Result right;
-        Result right_copy_in_ecx;
-        TypeInfo right_type_info = operand->type_info();
-        operand->ToRegister();
-        if (operand->reg().is(ecx)) {
-          right = allocator()->Allocate();
-          __ mov(right.reg(), ecx);
-          frame_->Spill(ecx);
-          right_copy_in_ecx = *operand;
-        } else {
-          right_copy_in_ecx = allocator()->Allocate(ecx);
-          __ mov(ecx, operand->reg());
-          right = *operand;
-        }
-        operand->Unuse();
-
-        answer = allocator()->Allocate();
-        DeferredInlineSmiOperationReversed* deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   right.reg(),
-                                                   right_type_info,
-                                                   overwrite_mode);
-        __ mov(answer.reg(), Immediate(int_value));
-        __ sar(ecx, kSmiTagSize);
-        if (!right_type_info.IsSmi()) {
-          deferred->Branch(carry);
-        } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(right.reg());
-        }
-        __ shl_cl(answer.reg());
-        __ cmp(answer.reg(), 0xc0000000);
-        deferred->Branch(sign);
-        __ SmiTag(answer.reg());
-
-        deferred->BindExit();
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        if (shift_value == 0) {
-          // Spill operand so it can be overwritten in the slow case.
-          frame_->Spill(operand->reg());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-          deferred->BindExit();
-          answer = *operand;
-        } else {
-          // Use a fresh temporary for nonzero shift values.
-          answer = allocator()->Allocate();
-          ASSERT(answer.is_valid());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             answer.reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          if (!operand->type_info().IsSmi()) {
-            __ test(operand->reg(), Immediate(kSmiTagMask));
-            deferred->Branch(not_zero);
-          } else if (FLAG_debug_code) {
-            __ AbortIfNotSmi(operand->reg());
-          }
-          __ mov(answer.reg(), operand->reg());
-          STATIC_ASSERT(kSmiTag == 0);  // adjust code if not the case
-          // We do no shifts, only the Smi conversion, if shift_value is 1.
-          if (shift_value > 1) {
-            __ shl(answer.reg(), shift_value - 1);
-          }
-          // Convert int result to Smi, checking that it is in int range.
-          STATIC_ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-          __ add(answer.reg(), Operand(answer.reg()));
-          deferred->Branch(overflow);
-          deferred->BindExit();
-          operand->Unuse();
-        }
-      }
-      break;
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      operand->ToRegister();
-      // DeferredInlineBinaryOperation requires all the registers that it is
-      // told about to be spilled.
-      frame_->Spill(operand->reg());
-      DeferredInlineBinaryOperation* deferred = NULL;
-      if (!operand->type_info().IsSmi()) {
-        Result left = allocator()->Allocate();
-        ASSERT(left.is_valid());
-        Result right = allocator()->Allocate();
-        ASSERT(right.is_valid());
-        deferred = new DeferredInlineBinaryOperation(
-            op,
-            operand->reg(),
-            left.reg(),
-            right.reg(),
-            operand->type_info(),
-            TypeInfo::Smi(),
-            overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->JumpToConstantRhs(not_zero, smi_value);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      if (op == Token::BIT_AND) {
-        __ and_(Operand(operand->reg()), Immediate(value));
-      } else if (op == Token::BIT_XOR) {
-        if (int_value != 0) {
-          __ xor_(Operand(operand->reg()), Immediate(value));
-        }
-      } else {
-        ASSERT(op == Token::BIT_OR);
-        if (int_value != 0) {
-          __ or_(Operand(operand->reg()), Immediate(value));
-        }
-      }
-      if (deferred != NULL) deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::DIV:
-      if (!reversed && int_value == 2) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        // Check that lowest log2(value) bits of operand are zero, and test
-        // smi tag at the same time.
-        STATIC_ASSERT(kSmiTag == 0);
-        STATIC_ASSERT(kSmiTagSize == 1);
-        __ test(operand->reg(), Immediate(3));
-        deferred->Branch(not_zero);  // Branch if non-smi or odd smi.
-        __ sar(operand->reg(), 1);
-        deferred->BindExit();
-        answer = *operand;
-      } else {
-        // Cannot fall through MOD to default case, so we duplicate the
-        // default case here.
-        Result constant_operand(value);
-        if (reversed) {
-          answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                            overwrite_mode);
-        } else {
-          answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                            overwrite_mode);
-        }
-      }
-      break;
-
-    // Generate inline code for mod of powers of 2 and negative powers of 2.
-    case Token::MOD:
-      if (!reversed &&
-          int_value != 0 &&
-          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        // Check for negative or non-Smi left hand side.
-        __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
-        deferred->Branch(not_zero);
-        if (int_value < 0) int_value = -int_value;
-        if (int_value == 1) {
-          __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
-        } else {
-          __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
-        }
-        deferred->BindExit();
-        answer = *operand;
-        break;
-      }
-      // Fall through if we did not find a power of 2 on the right hand side!
-      // The next case must be the default.
-
-    default: {
-      Result constant_operand(value);
-      if (reversed) {
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                          overwrite_mode);
-      }
-      break;
-    }
-  }
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
-  if (result.type_info().IsSmi()) return false;
-  if (result.type_info().IsInteger32()) return false;
-  if (!result.is_constant()) return true;
-  if (!result.handle()->IsHeapNumber()) return false;
-  return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
-  switch (cc) {
-    case less:          return below;
-    case equal:         return equal;
-    case less_equal:    return below_equal;
-    case greater:       return above;
-    case greater_equal: return above_equal;
-    default:            UNREACHABLE();
-  }
-  UNREACHABLE();
-  return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
-                                        bool inline_number_compare) {
-  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
-  if (nan_info == kCantBothBeNaN) {
-    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
-  }
-  if (inline_number_compare) {
-    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
-  }
-  return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
-                               Condition cc,
-                               bool strict,
-                               ControlDestination* dest) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  Result left_side;
-  Result right_side;
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == greater || cc == less_equal) {
-    cc = ReverseCondition(cc);
-    left_side = frame_->Pop();
-    right_side = frame_->Pop();
-  } else {
-    right_side = frame_->Pop();
-    left_side = frame_->Pop();
-  }
-  ASSERT(cc == less || cc == equal || cc == greater_equal);
-
-  // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi = false;
-  bool left_side_constant_null = false;
-  bool left_side_constant_1_char_string = false;
-  if (left_side.is_constant()) {
-    left_side_constant_smi = left_side.handle()->IsSmi();
-    left_side_constant_null = left_side.handle()->IsNull();
-    left_side_constant_1_char_string =
-        (left_side.handle()->IsString() &&
-         String::cast(*left_side.handle())->length() == 1 &&
-         String::cast(*left_side.handle())->IsAsciiRepresentation());
-  }
-  bool right_side_constant_smi = false;
-  bool right_side_constant_null = false;
-  bool right_side_constant_1_char_string = false;
-  if (right_side.is_constant()) {
-    right_side_constant_smi = right_side.handle()->IsSmi();
-    right_side_constant_null = right_side.handle()->IsNull();
-    right_side_constant_1_char_string =
-        (right_side.handle()->IsString() &&
-         String::cast(*right_side.handle())->length() == 1 &&
-         String::cast(*right_side.handle())->IsAsciiRepresentation());
-  }
-
-  if (left_side_constant_smi || right_side_constant_smi) {
-    bool is_loop_condition = (node->AsExpression() != NULL) &&
-        node->AsExpression()->is_loop_condition();
-    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
-                          left_side_constant_smi, right_side_constant_smi,
-                          is_loop_condition);
-  } else if (left_side_constant_1_char_string ||
-             right_side_constant_1_char_string) {
-    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
-      // Trivial case, comparing two constants.
-      int left_value = String::cast(*left_side.handle())->Get(0);
-      int right_value = String::cast(*right_side.handle())->Get(0);
-      switch (cc) {
-        case less:
-          dest->Goto(left_value < right_value);
-          break;
-        case equal:
-          dest->Goto(left_value == right_value);
-          break;
-        case greater_equal:
-          dest->Goto(left_value >= right_value);
-          break;
-        default:
-          UNREACHABLE();
-      }
-    } else {
-      // Only one side is a constant 1 character string.
-      // If left side is a constant 1-character string, reverse the operands.
-      // Since one side is a constant string, conversion order does not matter.
-      if (left_side_constant_1_char_string) {
-        Result temp = left_side;
-        left_side = right_side;
-        right_side = temp;
-        cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
-        // CompareStub and the inline code both support all values of cc.
-      }
-      // Implement comparison against a constant string, inlining the case
-      // where both sides are strings.
-      left_side.ToRegister();
-
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_not_string, is_string;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
-      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
-      __ test(left_side.reg(), Immediate(kSmiTagMask));
-      is_not_string.Branch(zero, &left_side);
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(),
-             FieldOperand(left_side.reg(), HeapObject::kMapOffset));
-      __ movzx_b(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      // If we are testing for equality then make use of the symbol shortcut.
-      // Check if the right left hand side has the same type as the left hand
-      // side (which is always a symbol).
-      if (cc == equal) {
-        Label not_a_symbol;
-        STATIC_ASSERT(kSymbolTag != 0);
-        // Ensure that no non-strings have the symbol bit set.
-        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
-        __ test(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
-        __ j(zero, &not_a_symbol);
-        // They are symbols, so do identity compare.
-        __ cmp(left_side.reg(), right_side.handle());
-        dest->true_target()->Branch(equal);
-        dest->false_target()->Branch(not_equal);
-        __ bind(&not_a_symbol);
-      }
-      // Call the compare stub if the left side is not a flat ascii string.
-      __ and_(temp.reg(),
-          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
-      __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
-      temp.Unuse();
-      is_string.Branch(equal, &left_side);
-
-      // Setup and call the compare stub.
-      is_not_string.Bind(&left_side);
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ cmp(result.reg(), 0);
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_string.Bind(&left_side);
-      // left_side is a sequential ASCII string.
-      left_side = Result(left_reg);
-      right_side = Result(right_val);
-      // Test string equality and comparison.
-      Label comparison_done;
-      if (cc == equal) {
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-        __ j(not_equal, &comparison_done);
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                char_value);
-      } else {
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-        // If the length is 0 then the jump is taken and the flags
-        // correctly represent being less than the one-character string.
-        __ j(below, &comparison_done);
-        // Compare the first character of the string with the
-        // constant 1-character string.
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                char_value);
-        __ j(not_equal, &comparison_done);
-        // If the first character is the same then the long string sorts after
-        // the short one.
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-      }
-      __ bind(&comparison_done);
-      left_side.Unuse();
-      right_side.Unuse();
-      dest->Split(cc);
-    }
-  } else {
-    // Neither side is a constant Smi, constant 1-char string or constant null.
-    // If either side is a non-smi constant, or known to be a heap number,
-    // skip the smi check.
-    bool known_non_smi =
-        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
-        left_side.type_info().IsDouble() ||
-        right_side.type_info().IsDouble();
-
-    NaNInformation nan_info =
-        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
-        kBothCouldBeNaN :
-        kCantBothBeNaN;
-
-    // Inline number comparison handling any combination of smi's and heap
-    // numbers if:
-    //   code is in a loop
-    //   the compare operation is different from equal
-    //   compare is not a for-loop comparison
-    // The reason for excluding equal is that it will most likely be done
-    // with smi's (not heap numbers) and the code to comparing smi's is inlined
-    // separately. The same reason applies for for-loop comparison which will
-    // also most likely be smi comparisons.
-    bool is_loop_condition = (node->AsExpression() != NULL)
-        && node->AsExpression()->is_loop_condition();
-    bool inline_number_compare =
-        loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
-    // Left and right needed in registers for the following code.
-    left_side.ToRegister();
-    right_side.ToRegister();
-
-    if (known_non_smi) {
-      // Inlined equality check:
-      // If at least one of the objects is not NaN, then if the objects
-      // are identical, they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmp(left_side.reg(), Operand(right_side.reg()));
-        dest->true_target()->Branch(equal);
-      }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
-      CompareStub stub(cc, strict, flags);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ test(answer.reg(), Operand(answer.reg()));
-      answer.Unuse();
-      dest->Split(cc);
-    } else {
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Register right_reg = right_side.reg();
-
-      // In-line check for comparing two smis.
-      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
-      if (has_valid_frame()) {
-        // Inline the equality check if both operands can't be a NaN. If both
-        // objects are the same they are equal.
-        if (nan_info == kCantBothBeNaN && cc == equal) {
-          __ cmp(left_side.reg(), Operand(right_side.reg()));
-          dest->true_target()->Branch(equal);
-        }
-
-        // Inlined number comparison:
-        if (inline_number_compare) {
-          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-        }
-
-        // End of in-line compare, call out to the compare stub. Don't include
-        // number comparison in the stub if it was inlined.
-        CompareFlags flags =
-            ComputeCompareFlags(nan_info, inline_number_compare);
-        CompareStub stub(cc, strict, flags);
-        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-        __ test(answer.reg(), Operand(answer.reg()));
-        answer.Unuse();
-        if (is_smi.is_linked()) {
-          dest->true_target()->Branch(cc);
-          dest->false_target()->Jump();
-        } else {
-          dest->Split(cc);
-        }
-      }
-
-      if (is_smi.is_linked()) {
-        is_smi.Bind();
-        left_side = Result(left_reg);
-        right_side = Result(right_reg);
-        __ cmp(left_side.reg(), Operand(right_side.reg()));
-        right_side.Unuse();
-        left_side.Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
-                                          bool strict,
-                                          ControlDestination* dest,
-                                          Result* left_side,
-                                          Result* right_side,
-                                          bool left_side_constant_smi,
-                                          bool right_side_constant_smi,
-                                          bool is_loop_condition) {
-  if (left_side_constant_smi && right_side_constant_smi) {
-    // Trivial case, comparing two constants.
-    int left_value = Smi::cast(*left_side->handle())->value();
-    int right_value = Smi::cast(*right_side->handle())->value();
-    switch (cc) {
-      case less:
-        dest->Goto(left_value < right_value);
-        break;
-      case equal:
-        dest->Goto(left_value == right_value);
-        break;
-      case greater_equal:
-        dest->Goto(left_value >= right_value);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  } else {
-    // Only one side is a constant Smi.
-    // If left side is a constant Smi, reverse the operands.
-    // Since one side is a constant Smi, conversion order does not matter.
-    if (left_side_constant_smi) {
-      Result* temp = left_side;
-      left_side = right_side;
-      right_side = temp;
-      cc = ReverseCondition(cc);
-      // This may re-introduce greater or less_equal as the value of cc.
-      // CompareStub and the inline code both support all values of cc.
-    }
-    // Implement comparison against a constant Smi, inlining the case
-    // where both sides are Smis.
-    left_side->ToRegister();
-    Register left_reg = left_side->reg();
-    Handle<Object> right_val = right_side->handle();
-
-    if (left_side->is_smi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(left_reg);
-      }
-      // Test smi equality and comparison by signed int comparison.
-      if (IsUnsafeSmi(right_side->handle())) {
-        right_side->ToRegister();
-        __ cmp(left_reg, Operand(right_side->reg()));
-      } else {
-        __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-      }
-      left_side->Unuse();
-      right_side->Unuse();
-      dest->Split(cc);
-    } else {
-      // Only the case where the left side could possibly be a non-smi is left.
-      JumpTarget is_smi;
-      if (cc == equal) {
-        // We can do the equality comparison before the smi check.
-        __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-        dest->true_target()->Branch(equal);
-        __ test(left_reg, Immediate(kSmiTagMask));
-        dest->false_target()->Branch(zero);
-      } else {
-        // Do the smi check, then the comparison.
-        __ test(left_reg, Immediate(kSmiTagMask));
-        is_smi.Branch(zero, left_side, right_side);
-      }
-
-      // Jump or fall through to here if we are comparing a non-smi to a
-      // constant smi.  If the non-smi is a heap number and this is not
-      // a loop condition, inline the floating point code.
-      if (!is_loop_condition &&
-          CpuFeatures::IsSupported(SSE2)) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        CpuFeatures::Scope use_sse2(SSE2);
-        JumpTarget not_number;
-        __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               Immediate(FACTORY->heap_number_map()));
-        not_number.Branch(not_equal, left_side);
-        __ movdbl(xmm1,
-                  FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = Smi::cast(*right_val)->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ mov(temp.reg(), Immediate(value));
-          __ cvtsi2sd(xmm0, Operand(temp.reg()));
-          temp.Unuse();
-        }
-        __ ucomisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, left_side);
-        left_side->Unuse();
-        dest->true_target()->Branch(DoubleCondition(cc));
-        dest->false_target()->Jump();
-        not_number.Bind(left_side);
-      }
-
-      // Setup and call the compare stub.
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, left_side, right_side);
-      result.ToRegister();
-      __ test(result.reg(), Operand(result.reg()));
-      result.Unuse();
-      if (cc == equal) {
-        dest->Split(cc);
-      } else {
-        dest->true_target()->Branch(cc);
-        dest->false_target()->Jump();
-
-        // It is important for performance for this case to be at the end.
-        is_smi.Bind(left_side, right_side);
-        if (IsUnsafeSmi(right_side->handle())) {
-          right_side->ToRegister();
-          __ cmp(left_reg, Operand(right_side->reg()));
-        } else {
-          __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-        }
-        left_side->Unuse();
-        right_side->Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-// Check that the comparison operand is a number. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void CheckComparisonOperand(MacroAssembler* masm_,
-                                   Result* operand,
-                                   Result* left_side,
-                                   Result* right_side,
-                                   JumpTarget* not_numbers) {
-  // Perform check if operand is not known to be a number.
-  if (!operand->type_info().IsNumber()) {
-    Label done;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &done);
-    __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->heap_number_map()));
-    not_numbers->Branch(not_equal, left_side, right_side, not_taken);
-    __ bind(&done);
-  }
-}
-
-
-// Load a comparison operand to the FPU stack. This assumes that the operand has
-// already been checked and is a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
-                                  Result* operand) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiUntag(operand->reg());
-    __ push(operand->reg());
-    __ fild_s(Operand(esp, 0));
-    __ pop(operand->reg());
-    __ SmiTag(operand->reg());
-  } else {
-    // Operand type not known, check for smi otherwise assume heap number.
-    Label smi;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &smi);
-    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-    __ bind(&smi);
-    __ SmiUntag(operand->reg());
-    __ push(operand->reg());
-    __ fild_s(Operand(esp, 0));
-    __ pop(operand->reg());
-    __ SmiTag(operand->reg());
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
-                                      Result* operand,
-                                      XMMRegister xmm_reg,
-                                      Result* left_side,
-                                      Result* right_side,
-                                      JumpTarget* not_numbers) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiUntag(operand->reg());
-    __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
-    __ SmiTag(operand->reg());
-  } else {
-    // Operand type not known, check for smi or heap number.
-    Label smi;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &smi);
-    if (!operand->type_info().IsNumber()) {
-      __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-             Immediate(FACTORY->heap_number_map()));
-      not_numbers->Branch(not_equal, left_side, right_side, taken);
-    }
-    __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-
-    __ bind(&smi);
-    // Comvert smi to float and keep the original smi.
-    __ SmiUntag(operand->reg());
-    __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
-    __ SmiTag(operand->reg());
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
-                                                   Result* right_side,
-                                                   Condition cc,
-                                                   ControlDestination* dest) {
-  ASSERT(left_side->is_register());
-  ASSERT(right_side->is_register());
-
-  JumpTarget not_numbers;
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-
-    // Load left and right operand into registers xmm0 and xmm1 and compare.
-    LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
-                              &not_numbers);
-    LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
-                              &not_numbers);
-    __ ucomisd(xmm0, xmm1);
-  } else {
-    Label check_right, compare;
-
-    // Make sure that both comparison operands are numbers.
-    CheckComparisonOperand(masm_, left_side, left_side, right_side,
-                           &not_numbers);
-    CheckComparisonOperand(masm_, right_side, left_side, right_side,
-                           &not_numbers);
-
-    // Load right and left operand to FPU stack and compare.
-    LoadComparisonOperand(masm_, right_side);
-    LoadComparisonOperand(masm_, left_side);
-    __ FCmp();
-  }
-
-  // Bail out if a NaN is involved.
-  not_numbers.Branch(parity_even, left_side, right_side, not_taken);
-
-  // Split to destination targets based on comparison.
-  left_side->Unuse();
-  right_side->Unuse();
-  dest->true_target()->Branch(DoubleCondition(cc));
-  dest->false_target()->Jump();
-
-  not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-    frame_->SpillTop();
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  Result answer = frame_->CallStub(&call_function, arg_count + 1);
-  // Restore context and replace function on the stack with the
-  // result of the stub invocation.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  frame()->Dup();
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame()->Push(name);
-  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  __ nop();
-  frame()->Push(&answer);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of frame at this point:
-  // Frame[0]: arguments object of the current function or the hole.
-  // Frame[1]: receiver
-  // Frame[2]: applicand.apply
-  // Frame[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  frame_->Dup();
-  Result probe = frame_->Pop();
-  { VirtualFrame::SpilledScope spilled_scope;
-    Label slow, done;
-    bool try_lazy = true;
-    if (probe.is_constant()) {
-      try_lazy = probe.handle()->IsArgumentsMarker();
-    } else {
-      __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
-      probe.Unuse();
-      __ j(not_equal, &slow);
-    }
-
-    if (try_lazy) {
-      Label build_args;
-      // Get rid of the arguments object probe.
-      frame_->Drop();  // Can be called on a spilled frame.
-      // Stack now has 3 elements on it.
-      // Contents of stack at this point:
-      // esp[0]: receiver
-      // esp[1]: applicand.apply
-      // esp[2]: applicand.
-
-      // Check that the receiver really is a JavaScript object.
-      __ mov(eax, Operand(esp, 0));
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      // We allow all JSObjects including JSFunctions.  As long as
-      // JS_FUNCTION_TYPE is the last instance type and it is right
-      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-      // bound.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-      __ j(below, &build_args);
-
-      // Check that applicand.apply is Function.prototype.apply.
-      __ mov(eax, Operand(esp, kPointerSize));
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
-      __ j(not_equal, &build_args);
-      __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
-      __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-      Handle<Code> apply_code(masm()->isolate()->builtins()->builtin(
-          Builtins::kFunctionApply));
-      __ cmp(Operand(ecx), Immediate(apply_code));
-      __ j(not_equal, &build_args);
-
-      // Check that applicand is a function.
-      __ mov(edi, Operand(esp, 2 * kPointerSize));
-      __ test(edi, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-      __ j(not_equal, &build_args);
-
-      // Copy the arguments to this function possibly from the
-      // adaptor frame below it.
-      Label invoke, adapted;
-      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-      __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-      __ cmp(Operand(ecx),
-             Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-      __ j(equal, &adapted);
-
-      // No arguments adaptor frame. Copy fixed number of arguments.
-      __ mov(eax, Immediate(scope()->num_parameters()));
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        __ push(frame_->ParameterAt(i));
-      }
-      __ jmp(&invoke);
-
-      // Arguments adaptor frame present. Copy arguments from there, but
-      // avoid copying too many arguments to avoid stack overflows.
-      __ bind(&adapted);
-      static const uint32_t kArgumentsLimit = 1 * KB;
-      __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-      __ SmiUntag(eax);
-      __ mov(ecx, Operand(eax));
-      __ cmp(eax, kArgumentsLimit);
-      __ j(above, &build_args);
-
-      // Loop through the arguments pushing them onto the execution
-      // stack. We don't inform the virtual frame of the push, so we don't
-      // have to worry about getting rid of the elements from the virtual
-      // frame.
-      Label loop;
-      // ecx is a small non-negative integer, due to the test above.
-      __ test(ecx, Operand(ecx));
-      __ j(zero, &invoke);
-      __ bind(&loop);
-      __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &loop);
-
-      // Invoke the function.
-      __ bind(&invoke);
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION);
-      // Drop applicand.apply and applicand from the stack, and push
-      // the result of the function call, but leave the spilled frame
-      // unchanged, with 3 elements, so it is correct when we compile the
-      // slow-case code.
-      __ add(Operand(esp), Immediate(2 * kPointerSize));
-      __ push(eax);
-      // Stack now has 1 element:
-      //   esp[0]: result
-      __ jmp(&done);
-
-      // Slow-case: Allocate the arguments object since we know it isn't
-      // there, and fall-through to the slow-case where we call
-      // applicand.apply.
-      __ bind(&build_args);
-      // Stack now has 3 elements, because we have jumped from where:
-      // esp[0]: receiver
-      // esp[1]: applicand.apply
-      // esp[2]: applicand.
-
-      // StoreArgumentsObject requires a correct frame, and may modify it.
-      Result arguments_object = StoreArgumentsObject(false);
-      frame_->SpillAll();
-      arguments_object.ToRegister();
-      frame_->EmitPush(arguments_object.reg());
-      arguments_object.Unuse();
-      // Stack and frame now have 4 elements.
-      __ bind(&slow);
-    }
-
-    // Generic computation of x.apply(y, args) with no special optimization.
-    // Flip applicand.apply and applicand on the stack, so
-    // applicand looks like the receiver of the applicand.apply call.
-    // Then process it as a normal function call.
-    __ mov(eax, Operand(esp, 3 * kPointerSize));
-    __ mov(ebx, Operand(esp, 2 * kPointerSize));
-    __ mov(Operand(esp, 2 * kPointerSize), eax);
-    __ mov(Operand(esp, 3 * kPointerSize), ebx);
-
-    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-    Result res = frame_->CallStub(&call_function, 3);
-    // The function and its two arguments have been dropped.
-    frame_->Drop(1);  // Drop the receiver as well.
-    res.ToRegister();
-    frame_->EmitPush(res.reg());
-    // Stack now has 1 element:
-    //   esp[0]: result
-    if (try_lazy) __ bind(&done);
-  }  // End of spilled scope.
-  // Restore the context register after a call.
-  frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
-  DeferredStackCheck() {
-    set_comment("[ DeferredStackCheck");
-  }
-
-  virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
-  StackCheckStub stub;
-  __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
-  DeferredStackCheck* deferred = new DeferredStackCheck;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm()->isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  deferred->Branch(below);
-  deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Visit(statement);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  VisitStatements(statements);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.  The inevitable call
-  // will sync frame elements to memory anyway, so we do it eagerly to
-  // allow us to push the arguments directly into place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  frame_->EmitPush(esi);  // The context is the first argument.
-  frame_->EmitPush(Immediate(pairs));
-  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
-  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.  Sync the virtual frame eagerly
-    // so we can simply push the arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(var->name()));
-    // Declaration nodes are always introduced in one of two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-  if (val != NULL) {
-    {
-      // Set the initial value.
-      Reference target(this, node->proxy());
-      Load(val);
-      target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
-    }
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  // Remove the lingering expression result from the top of stack.
-  frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    JumpTarget then;
-    JumpTarget else_;
-    ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The else target was bound, so we compile the else part first.
-      Visit(node->else_statement());
-
-      // We may have dangling jumps to the then part.
-      if (then.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then target was bound, so we compile the then part first.
-      Visit(node->then_statement());
-
-      if (else_.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    }
-
-  } else if (has_then_stm) {
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // then part.
-      if (then.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then label was bound.
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.true_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // else part.
-      if (else_.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    } else {
-      // The else label was bound.
-      Visit(node->else_statement());
-    }
-
-  } else {
-    ASSERT(!has_then_stm && !has_else_stm);
-    // We only care about the condition's side effects (not its value
-    // or control flow effect).  LoadCondition is called without
-    // forcing control flow.
-    ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), &dest, false);
-    if (!dest.is_used()) {
-      // We got a value on the frame rather than (or in addition to)
-      // control flow.
-      frame_->Drop();
-    }
-  }
-
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result return_value = frame_->Pop();
-  masm()->positions_recorder()->WriteRecordedPositions();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump(&return_value);
-  } else {
-    frame_->PrepareForReturn();
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump(&return_value);
-    } else {
-      function_return_.Bind(&return_value);
-      GenerateReturnSequence(&return_value);
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
-  // The return value is a live (but not currently reference counted)
-  // reference to eax.  This is safe because the current frame does not
-  // contain a reference to eax (it is prepared for the return by spilling
-  // all registers).
-  if (FLAG_trace) {
-    frame_->Push(return_value);
-    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-  return_value->ToRegister(eax);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-
-  // Leave the frame and return popping the arguments and the
-  // receiver.
-  frame_->Exit();
-  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
-  __ Ret(arguments_bytes, ecx);
-  DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Check that the size of the code used for returning is large enough
-  // for the debugger's requirements.
-  ASSERT(Assembler::kJSReturnSequenceLength <=
-         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result context;
-  if (node->is_catch_block()) {
-    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    context = frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  // Update context local.
-  frame_->SaveContextRegister();
-
-  // Verify that the runtime call result and esi agree.
-  if (FLAG_debug_code) {
-    __ cmp(context.reg(), Operand(esi));
-    __ Assert(equal, "Runtime::NewContext should end up in esi");
-  }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
-  // Update context local.
-  frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  // Compile the switch value.
-  Load(node->tag());
-
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  JumpTarget next_test;
-  // Compile the case label expressions and comparisons.  Exit early
-  // if a comparison is unconditionally true.  The target next_test is
-  // bound before the loop in order to indicate control flow to the
-  // first comparison.
-  next_test.Bind();
-  for (int i = 0; i < length && !next_test.is_unused(); i++) {
-    CaseClause* clause = cases->at(i);
-    // The default is not a test, but remember it for later.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    // We recycle the same target next_test for each test.  Bind it if
-    // the previous test has not done so and then unuse it for the
-    // loop.
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    next_test.Unuse();
-
-    // Duplicate the switch value.
-    frame_->Dup();
-
-    // Compile the label expression.
-    Load(clause->label());
-
-    // Compare and branch to the body if true or the next test if
-    // false.  Prefer the next test as a fall through.
-    ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(node, equal, true, &dest);
-
-    // If the comparison fell through to the true target, jump to the
-    // actual body.
-    if (dest.true_was_fall_through()) {
-      clause->body_target()->Unuse();
-      clause->body_target()->Jump();
-    }
-  }
-
-  // If there was control flow to a next test from the last one
-  // compiled, compile a jump to the default or break target.
-  if (!next_test.is_unused()) {
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    // Drop the switch value.
-    frame_->Drop();
-    if (default_clause != NULL) {
-      default_clause->body_target()->Jump();
-    } else {
-      node->break_target()->Jump();
-    }
-  }
-
-  // The last instruction emitted was a jump, either to the default
-  // clause or the break target, or else to a case body from the loop
-  // that compiles the tests.
-  ASSERT(!has_valid_frame());
-  // Compile case bodies as needed.
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-
-    // There are two ways to reach the body: from the corresponding
-    // test or as the fall through of the previous body.
-    if (clause->body_target()->is_linked() || has_valid_frame()) {
-      if (clause->body_target()->is_linked()) {
-        if (has_valid_frame()) {
-          // If we have both a jump to the test and a fall through, put
-          // a jump on the fall through path to avoid the dropping of
-          // the switch value on the test path.  The exception is the
-          // default which has already had the switch value dropped.
-          if (clause->is_default()) {
-            clause->body_target()->Bind();
-          } else {
-            JumpTarget body;
-            body.Jump();
-            clause->body_target()->Bind();
-            frame_->Drop();
-            body.Bind();
-          }
-        } else {
-          // No fall through to worry about.
-          clause->body_target()->Bind();
-          if (!clause->is_default()) {
-            frame_->Drop();
-          }
-        }
-      } else {
-        // Otherwise, we have only fall through.
-        ASSERT(has_valid_frame());
-      }
-
-      // We are now prepared to compile the body.
-      Comment cmnt(masm_, "[ Case body");
-      VisitStatements(clause->statements());
-    }
-    clause->body_target()->Unuse();
-  }
-
-  // We may not have a valid frame here so bind the break target only
-  // if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  // Label the top of the loop for the backward jump if necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // Use the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      // No need to label it.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      break;
-    case DONT_KNOW:
-      // Continue is the test, so use the backward body target.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control flow can fall off the end of the body, jump back
-      // to the top and bind the break target at the exit.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // We may have had continues or breaks in the body.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), &dest, true);
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-  }
-
-  DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop with the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is the test at the bottom, no need to label the test
-        // at the top.  The body is a backward target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Label the test at the top as the continue target.  The body
-        // is a forward-only target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      }
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // The loop body has been labeled with the continue target.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        // If we have chosen to recompile the test at the bottom,
-        // then it is the continue target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here and thus an invalid fall-through).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // If we have chosen not to recompile the test at the bottom,
-        // jump back to the one at the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
-  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
-  if (slot->type() == Slot::LOCAL) {
-    frame_->SetTypeForLocalAt(slot->index(), info);
-  } else {
-    frame_->SetTypeForParamAt(slot->index(), info);
-  }
-  if (FLAG_debug_code && info.IsSmi()) {
-    if (slot->type() == Slot::LOCAL) {
-      frame_->PushLocalAt(slot->index());
-    } else {
-      frame_->PushParameterAt(slot->index());
-    }
-    Result var = frame_->Pop();
-    var.ToRegister();
-    __ AbortIfNotSmi(var.reg());
-  }
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-
-  // Compile the init expression if present.
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything else.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-
-  // Target for backward edge if no test at the bottom, otherwise
-  // unused.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-  // Target for backward edge if there is a test at the bottom,
-  // otherwise used as target for test at the top.
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop.
-      if (node->next() == NULL) {
-        // Use the continue target if there is no update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // Otherwise use the backward loop target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is either the update expression or the test at the
-        // bottom, no need to label the test at the top.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else if (node->next() == NULL) {
-        // We are not recompiling the test at the bottom and there is no
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // We are not recompiling the test at the bottom and there is an
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  // We know that the loop index is a smi if it is not modified in the
-  // loop body and it is checked against a constant limit in the loop
-  // condition.  In this case, we reset the static type information of the
-  // loop index to smi before compiling the body, the update expression, and
-  // the bottom check of the loop condition.
-  if (node->is_fast_smi_loop()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
-  }
-
-  Visit(node->body());
-
-  // If there is an update expression, compile it if necessary.
-  if (node->next() != NULL) {
-    if (node->continue_target()->is_linked()) {
-      node->continue_target()->Bind();
-    }
-
-    // Control can reach the update by falling out of the body or by a
-    // continue.
-    if (has_valid_frame()) {
-      // Record the source position of the statement as this code which
-      // is after the code for the body actually belongs to the loop
-      // statement and not the body.
-      CodeForStatementPosition(node);
-      Visit(node->next());
-    }
-  }
-
-  // Set the type of the loop variable to smi before compiling the test
-  // expression if we are in a fast smi loop condition.
-  if (node->is_fast_smi_loop() && has_valid_frame()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
-  }
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      if (has_valid_frame()) {
-        if (node->next() == NULL) {
-          node->continue_target()->Jump();
-        } else {
-          loop.Jump();
-        }
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        if (node->continue_target()->is_linked()) {
-          // We can have dangling jumps to the continue target if there
-          // was no update expression.
-          node->continue_target()->Bind();
-        }
-        // Control can reach the test at the bottom by falling out of
-        // the body, by a continue in the body, or from the update
-        // expression.
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // Otherwise, jump back to the test at the top.
-        if (has_valid_frame()) {
-          if (node->next() == NULL) {
-            node->continue_target()->Jump();
-          } else {
-            loop.Jump();
-          }
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  LoadAndSpill(node->enumerable());
-
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(eax);
-
-  // eax: value to be iterated over
-  __ cmp(eax, FACTORY->undefined_value());
-  exit.Branch(equal);
-  __ cmp(eax, FACTORY->null_value());
-  exit.Branch(equal);
-
-  // Stack layout in body:
-  // [iteration counter (smi)] <- slot 0
-  // [length of array]         <- slot 1
-  // [FixedArray]              <- slot 2
-  // [Map or 0]                <- slot 3
-  // [Object]                  <- slot 4
-
-  // Check if enumerable is already a JSObject
-  // eax: value to be iterated over
-  __ test(eax, Immediate(kSmiTagMask));
-  primitive.Branch(zero);
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  jsobject.Branch(above_equal);
-
-  primitive.Bind();
-  frame_->EmitPush(eax);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
-  // function call returns the value in eax, which is where we want it below
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // eax: value to be iterated over
-  frame_->EmitPush(eax);  // Push the object being iterated over.
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ mov(ecx, eax);
-  loop.Bind();
-  // Check that there are no elements.
-  __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
-  call_runtime.Branch(not_equal);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
-  call_runtime.Branch(equal);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
-  __ test(edx, Immediate(kSmiTagMask));
-  call_runtime.Branch(zero);
-  // For all objects but the receiver, check that the cache is empty.
-  __ cmp(ecx, Operand(eax));
-  check_prototype.Branch(equal);
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
-  call_runtime.Branch(not_equal);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
-  loop.Branch(not_equal);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(eax);  // push the Object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a map from the runtime call, we can do a fast
-  // modification check. Otherwise, we got a fixed array, and we have
-  // to do a slow check.
-  // eax: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ mov(edx, Operand(eax));
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ cmp(ecx, FACTORY->meta_map());
-  fixed_array.Branch(not_equal);
-
-  use_cache.Bind();
-  // Get enum cache
-  // eax: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ mov(ecx, Operand(eax));
-
-  __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
-  // Get the bridge array held in the enumeration index field.
-  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
-  // Get the cache from the bridge array.
-  __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(eax);  // <- slot 3
-  frame_->EmitPush(edx);  // <- slot 2
-  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
-  frame_->EmitPush(eax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
-  entry.Jump();
-
-  fixed_array.Bind();
-  // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
-  frame_->EmitPush(eax);  // <- slot 2
-
-  // Push the length of the array and the initial index onto the stack.
-  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
-  frame_->EmitPush(eax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
-
-  // Condition.
-  entry.Bind();
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  __ mov(eax, frame_->ElementAt(0));  // load the current count
-  __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
-  node->break_target()->Branch(above_equal);
-
-  // Get the i'th entry of the array.
-  __ mov(edx, frame_->ElementAt(2));
-  __ mov(ebx, FixedArrayElementOperand(edx, eax));
-
-  // Get the expected map from the stack or a zero map in the
-  // permanent slow case eax: current iteration count ebx: i'th entry
-  // of the enum cache
-  __ mov(edx, frame_->ElementAt(3));
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
-  // eax: current iteration count
-  // ebx: i'th entry of the enum cache
-  // edx: expected map value
-  __ mov(ecx, frame_->ElementAt(4));
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ cmp(ecx, Operand(edx));
-  end_del_check.Branch(equal);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(ebx);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
-  __ mov(ebx, Operand(eax));
-
-  // If the property has been removed while iterating, we just skip it.
-  __ test(ebx, Operand(ebx));
-  node->continue_target()->Branch(equal);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  edx: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(ebx);
-  { Reference each(this, node->each());
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        // Loading a reference may leave the frame in an unspilled state.
-        frame_->SpillAll();
-        // Get the value (under the reference on the stack) from memory.
-        frame_->EmitPush(frame_->ElementAt(each.size()));
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop(2);
-      } else {
-        // If the reference was to a slot we rely on the convenient property
-        // that it doesn't matter whether a value (eg, ebx pushed above) is
-        // right on top of or right underneath a zero-sized reference.
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop();
-      }
-    }
-  }
-  // Unloading a reference may leave the frame in an unspilled state.
-  frame_->SpillAll();
-
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  VisitAndSpill(node->body());
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(eax);
-  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
-  frame_->EmitPush(eax);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(eax);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  VisitStatementsAndSpill(node->catch_block()->statements());
-  if (has_valid_frame()) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address,
-                                    masm()->isolate());
-
-  // Make sure that there's nothing left on the stack above the
-  // handler structure.
-  if (FLAG_debug_code) {
-    __ mov(eax, Operand::StaticVariable(handler_address));
-    __ cmp(esp, Operand(eax));
-    __ Assert(equal, "stack pointer should point to top handler");
-  }
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(Operand::StaticVariable(handler_address));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing targets that
-  // have been jumped to.  Deallocate each shadow target.
-  Result return_value;
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain; be careful not to destroy the TOS if
-      // there is one.
-      if (i == kReturnShadowIndex) {
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(eax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(esp, Operand::StaticVariable(handler_address));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(Operand::StaticVariable(handler_address));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
-        shadows[i]->other_target()->Jump(&return_value);
-      } else {
-        shadows[i]->other_target()->Jump();
-      }
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(eax);
-  // In case of thrown exceptions, this is where we continue.
-  __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address,
-                                    masm()->isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(Operand::StaticVariable(handler_address));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in ecx, then jump around the unlink blocks if any.
-    frame_->EmitPush(Immediate(FACTORY->undefined_value()));
-    __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // on the virtual frame.  We must preserve it until it is
-      // pushed.
-      if (i == kReturnShadowIndex) {
-        Result return_value;
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(eax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ mov(esp, Operand::StaticVariable(handler_address));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(Operand::StaticVariable(handler_address));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this target shadowed the function return, materialize
-        // the return value on the stack.
-        frame_->EmitPush(eax);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        frame_->EmitPush(Immediate(FACTORY->undefined_value()));
-      }
-      __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(ecx);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  VisitStatementsAndSpill(node->finally_block()->statements());
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(ecx);
-    frame_->EmitPop(eax);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      BreakTarget* original = shadows[i]->other_target();
-      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
-      if (i == kReturnShadowIndex) {
-        // The return value is (already) in eax.
-        Result return_value = allocator_->Allocate(eax);
-        ASSERT(return_value.is_valid());
-        if (function_return_is_shadowed_) {
-          original->Branch(equal, &return_value);
-        } else {
-          // Branch around the preparation for return which may emit
-          // code.
-          JumpTarget skip;
-          skip.Branch(not_equal);
-          frame_->PrepareForReturn();
-          original->Jump(&return_value);
-          skip.Bind();
-        }
-      } else {
-        original->Branch(equal);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
-    exit.Branch(not_equal);
-
-    // Rethrow exception.
-    frame_->EmitPush(eax);  // undo pop from above
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Spill everything, even constants, to the frame.
-  frame_->SpillAll();
-
-  frame_->DebugBreak();
-  // Ignore the return value.
-#endif
-}
-
-
-Result CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // The inevitable call will sync frame elements to memory anyway, so
-  // we do it eagerly to allow us to push the arguments directly into
-  // place.
-  frame()->SyncRange(0, frame()->element_count() - 1);
-
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame()->EmitPush(Immediate(function_info));
-    return frame()->CallStub(&stub, 1);
-  } else {
-    // Call the runtime to instantiate the function based on the
-    // shared function info.
-    frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(function_info));
-    frame()->EmitPush(Immediate(pretenure
-                                ? FACTORY->true_value()
-                                : FACTORY->false_value()));
-    return frame()->CallRuntime(Runtime::kNewClosure, 3);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
-  ASSERT(!in_safe_int32_mode());
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  // Check for stack-overflow exception.
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    return;
-  }
-  Result result = InstantiateFunction(function_info, node->pretenure());
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  Result result = InstantiateFunction(node->shared_function_info(), false);
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  Comment cmnt(masm_, "[ Conditional");
-  ASSERT(!in_safe_int32_mode());
-  JumpTarget then;
-  JumpTarget else_;
-  JumpTarget exit;
-  ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), &dest, true);
-
-  if (dest.false_was_fall_through()) {
-    // The else target was bound, so we compile the else part first.
-    Load(node->else_expression());
-
-    if (then.is_linked()) {
-      exit.Jump();
-      then.Bind();
-      Load(node->then_expression());
-    }
-  } else {
-    // The then target was bound, so we compile the then part first.
-    Load(node->then_expression());
-
-    if (else_.is_linked()) {
-      exit.Jump();
-      else_.Bind();
-      Load(node->else_expression());
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-    JumpTarget slow;
-    JumpTarget done;
-    Result value;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &value,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // A runtime call is inevitable.  We eagerly sync frame elements
-    // to memory so that we can push the arguments directly into place
-    // on top of the frame.
-    frame()->SyncRange(0, frame()->element_count() - 1);
-    frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(slot->var()->name()));
-    if (typeof_state == INSIDE_TYPEOF) {
-      value =
-          frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-      value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind(&value);
-    frame_->Push(&value);
-
-  } else if (slot->var()->mode() == Variable::CONST) {
-    // Const slots may contain 'the hole' value (the constant hasn't been
-    // initialized yet) which needs to be converted into the 'undefined'
-    // value.
-    //
-    // We currently spill the virtual frame because constants use the
-    // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope;
-    Comment cmnt(masm_, "[ Load const");
-    Label exit;
-    __ mov(ecx, SlotOperand(slot, ecx));
-    __ cmp(ecx, FACTORY->the_hole_value());
-    __ j(not_equal, &exit);
-    __ mov(ecx, FACTORY->undefined_value());
-    __ bind(&exit);
-    frame()->EmitPush(ecx);
-
-  } else if (slot->type() == Slot::PARAMETER) {
-    frame()->PushParameterAt(slot->index());
-
-  } else if (slot->type() == Slot::LOCAL) {
-    frame()->PushLocalAt(slot->index());
-
-  } else {
-    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
-    // here.
-    //
-    // The use of SlotOperand below is safe for an unspilled frame
-    // because it will always be a context slot.
-    ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame()->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                    TypeofState state) {
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // If the loaded value is a constant, we know if the arguments
-  // object has been lazily loaded yet.
-  Result result = frame()->Pop();
-  if (result.is_constant()) {
-    if (result.handle()->IsArgumentsMarker()) {
-      result = StoreArgumentsObject(false);
-    }
-    frame()->Push(&result);
-    return;
-  }
-  ASSERT(result.is_register());
-  // The loaded value is in a register. If it is the sentinel that
-  // indicates that we haven't loaded the arguments object yet, we
-  // need to do it now.
-  JumpTarget exit;
-  __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
-  frame()->Push(&result);
-  exit.Branch(not_equal);
-
-  result = StoreArgumentsObject(false);
-  frame()->SetElementAt(0, &result);
-  result.Unuse();
-  exit.Bind();
-  return;
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    JumpTarget* slow) {
-  ASSERT(!in_safe_int32_mode());
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register context = esi;
-  Result tmp = allocator_->Allocate();
-  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      // Load next context in chain.
-      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s != NULL && s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(tmp.reg())) {
-      __ mov(tmp.reg(), context);
-    }
-    __ bind(&next);
-    // Terminate at global context.
-    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->global_context_map()));
-    __ j(equal, &fast);
-    // Check that extension is NULL.
-    __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
-    slow->Branch(not_equal, not_taken);
-    // Load next context in chain.
-    __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
-    __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-  tmp.Unuse();
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  // The register allocator prefers eax if it is free, so the code generator
-  // will load the global object directly into eax, which is where the LoadIC
-  // expects it.
-  frame_->Spill(eax);
-  LoadGlobal();
-  frame_->Push(slot->var()->name());
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT;
-  Result answer = frame_->CallLoadIC(mode);
-  // A test eax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test eax
-  // instruction here.
-  __ nop();
-  return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    Result* result,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    done->Jump(result);
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      // Allocate a fresh register to use as a temp in
-      // ContextSlotOperandCheckExtensions and to hold the result
-      // value.
-      *result = allocator()->Allocate();
-      ASSERT(result->is_valid());
-      __ mov(result->reg(),
-             ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ cmp(result->reg(), FACTORY->the_hole_value());
-        done->Branch(not_equal, result);
-        __ mov(result->reg(), FACTORY->undefined_value());
-      }
-      done->Jump(result);
-    } else if (rewrite != NULL) {
-      // Generate fast case for calls of an argument function.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          Result arguments = allocator()->Allocate();
-          ASSERT(arguments.is_valid());
-          __ mov(arguments.reg(),
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   arguments,
-                                                   slow));
-          frame_->Push(&arguments);
-          frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad();
-          done->Jump(result);
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.  Since the call is inevitable,
-    // we eagerly sync the virtual frame so we can directly push the
-    // arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(slot->var()->name()));
-
-    Result value;
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize const
-      // properties (introduced via eval("const foo = (some expr);")). Also,
-      // uses the current function context instead of the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the same
-      // time, because the const declaration may be at the end of the eval
-      // code (sigh...) and the const variable may have been used before
-      // (where its value is 'undefined'). Thus, we can only do the
-      // initialization when we actually encounter the expression and when
-      // the expression operands are defined and valid, and thus we need the
-      // split into 2 operations: declaration of the context slot followed
-      // by initialization.
-      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling chained assignment
-    // expressions.
-    frame_->Push(&value);
-
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is executed,
-      // the code is identical to a normal store (see below).
-      //
-      // We spill the frame in the code below because the direct-frame
-      // access of SlotOperand is potentially unsafe with an unspilled
-      // frame.
-      VirtualFrame::SpilledScope spilled_scope;
-      Comment cmnt(masm_, "[ Init const");
-      __ mov(ecx, SlotOperand(slot, ecx));
-      __ cmp(ecx, FACTORY->the_hole_value());
-      exit.Branch(not_equal);
-    }
-
-    // We must execute the store.  Storing a variable must keep the (new)
-    // value on the stack. This is necessary for compiling assignment
-    // expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will initialize
-    // consts to 'the hole' value and by doing so, end up calling this code.
-    if (slot->type() == Slot::PARAMETER) {
-      frame_->StoreToParameterAt(slot->index());
-    } else if (slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(slot->index());
-    } else {
-      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
-      //
-      // The use of SlotOperand below is safe for an unspilled frame
-      // because the slot is a context slot.
-      ASSERT(slot->type() == Slot::CONTEXT);
-      frame_->Dup();
-      Result value = frame_->Pop();
-      value.ToRegister();
-      Result start = allocator_->Allocate();
-      ASSERT(start.is_valid());
-      __ mov(SlotOperand(slot, start.reg()), value.reg());
-      // RecordWrite may destroy the value registers.
-      //
-      // TODO(204): Avoid actually spilling when the value is not
-      // needed (probably the common case).
-      frame_->Spill(value.reg());
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
-      // The results start, value, and temp are unused by going out of
-      // scope.
-    }
-
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* slot) {
-  Comment cmnt(masm_, "[ Slot");
-  if (in_safe_int32_mode()) {
-    if ((slot->type() == Slot::LOCAL  && !slot->is_arguments())) {
-      frame()->UntaggedPushLocalAt(slot->index());
-    } else if (slot->type() == Slot::PARAMETER) {
-      frame()->UntaggedPushParameterAt(slot->index());
-    } else {
-      UNREACHABLE();
-    }
-  } else {
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-  }
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    ASSERT(!in_safe_int32_mode());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  Comment cmnt(masm_, "[ Literal");
-  if (frame_->ConstantPoolOverflowed()) {
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    if (in_safe_int32_mode()) {
-      temp.set_untagged_int32(true);
-    }
-    __ Set(temp.reg(), Immediate(node->handle()));
-    frame_->Push(&temp);
-  } else {
-    if (in_safe_int32_mode()) {
-      frame_->PushUntaggedElement(node->handle());
-    } else {
-      frame_->Push(node->handle());
-    }
-  }
-}
-
-
-void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ push(Immediate(bits ^ jit_cookie_));
-  __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
-  __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
-  ASSERT(target.is_valid());
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ Set(target, Immediate(bits ^ jit_cookie_));
-  __ xor_(target, jit_cookie_);
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
-  if (!value->IsSmi()) return false;
-  int int_value = Smi::cast(*value)->value();
-  return !is_intn(int_value, kMaxSmiInlinedBits);
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function.  Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
-  DeferredRegExpLiteral(Register boilerplate,
-                        Register literals,
-                        RegExpLiteral* node)
-      : boilerplate_(boilerplate), literals_(literals), node_(node) {
-    set_comment("[ DeferredRegExpLiteral");
-  }
-
-  void Generate();
-
- private:
-  Register boilerplate_;
-  Register literals_;
-  RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
-  // Since the entry is undefined we call the runtime system to
-  // compute the literal.
-  // Literal array (0).
-  __ push(literals_);
-  // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
-  // RegExp pattern (2).
-  __ push(Immediate(node_->pattern()));
-  // RegExp flags (3).
-  __ push(Immediate(node_->flags()));
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
-  DeferredAllocateInNewSpace(int size,
-                             Register target,
-                             int registers_to_save = 0)
-    : size_(size), target_(target), registers_to_save_(registers_to_save) {
-    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
-    ASSERT_EQ(0, registers_to_save & target.bit());
-    set_comment("[ DeferredAllocateInNewSpace");
-  }
-  void Generate();
-
- private:
-  int size_;
-  Register target_;
-  int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
-  for (int i = 0; i < kNumRegs; i++) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ push(save_register);
-    }
-  }
-  __ push(Immediate(Smi::FromInt(size_)));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  if (!target_.is(eax)) {
-    __ mov(target_, eax);
-  }
-  for (int i = kNumRegs - 1; i >= 0; i--) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ pop(save_register);
-    }
-  }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ RegExp Literal");
-
-  // Retrieve the literals array and check the allocated entry.  Begin
-  // with a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  Result boilerplate = allocator_->Allocate();
-  ASSERT(boilerplate.is_valid());
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
-  // Check whether we need to materialize the RegExp object.  If so,
-  // jump to the deferred code passing the literals array.
-  DeferredRegExpLiteral* deferred =
-      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
-  __ cmp(boilerplate.reg(), FACTORY->undefined_value());
-  deferred->Branch(equal);
-  deferred->BindExit();
-
-  // Register of boilerplate contains RegExp object.
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
-  DeferredAllocateInNewSpace* allocate_fallback =
-      new DeferredAllocateInNewSpace(size, literals.reg());
-  frame_->Push(&boilerplate);
-  frame_->SpillTop();
-  __ AllocateInNewSpace(size,
-                        literals.reg(),
-                        tmp.reg(),
-                        no_reg,
-                        allocate_fallback->entry_label(),
-                        TAG_OBJECT);
-  allocate_fallback->BindExit();
-  boilerplate = frame_->Pop();
-  // Copy from boilerplate to clone and return clone.
-
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
-    __ mov(FieldOperand(literals.reg(), i), tmp.reg());
-  }
-  frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
-  frame_->Push(&literals);
-  // Literal index.
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant properties.
-  frame_->Push(node->constant_properties());
-  // Should the object literal have fast elements?
-  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
-  Result clone;
-  if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->Push(&clone);
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through.
-      case ObjectLiteral::Property::COMPUTED: {
-        Handle<Object> key(property->key()->handle());
-        if (key->IsSymbol()) {
-          // Duplicate the object as the IC receiver.
-          frame_->Dup();
-          Load(property->value());
-          if (property->emit_store()) {
-            Result ignored =
-                frame_->CallStoreIC(Handle<String>::cast(key), false,
-                                    strict_mode_flag());
-            // A test eax instruction following the store IC call would
-            // indicate the presence of an inlined version of the
-            // store. Add a nop to indicate that there is no such
-            // inlined version.
-            __ nop();
-          } else {
-            frame_->Drop(2);
-          }
-          break;
-        }
-        // Fall through
-      }
-      case ObjectLiteral::Property::PROTOTYPE: {
-          // Duplicate the object as an argument to the runtime call.
-          frame_->Dup();
-          Load(property->key());
-          Load(property->value());
-          if (property->emit_store()) {
-            frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
-            // Ignore the result.
-            Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
-          } else {
-            frame_->Drop(3);
-          }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(1));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(0));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      default: UNREACHABLE();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  frame_->Push(&literals);
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  frame_->Push(node->constant_elements());
-  int length = node->values()->length();
-  Result clone;
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
-  } else if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-  }
-  frame_->Push(&clone);
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < length; i++) {
-    Expression* value = node->values()->at(i);
-
-    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
-      continue;
-    }
-
-    // The property must be set by generated code.
-    Load(value);
-
-    // Get the property value off the stack.
-    Result prop_value = frame_->Pop();
-    prop_value.ToRegister();
-
-    // Fetch the array literal while leaving a copy on the stack and
-    // use it to get the elements array.
-    frame_->Dup();
-    Result elements = frame_->Pop();
-    elements.ToRegister();
-    frame_->Spill(elements.reg());
-    // Get the elements array.
-    __ mov(elements.reg(),
-           FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
-
-    // Update the write barrier for the array address.
-    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_valid());
-    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
-  }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  ASSERT(!in_safe_int32_mode());
-  ASSERT(!in_spilled_code());
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  Result result =
-      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      frame()->Dup();
-    }
-    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame()->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else if (var != NULL) {
-      // The LoadIC stub expects the object in eax.
-      // Freeing eax causes the code generator to load the global into it.
-      frame_->Spill(eax);
-      LoadGlobal();
-    } else {
-      frame()->Dup();
-    }
-    Result value = EmitNamedLoad(name, var != NULL);
-    frame()->Push(&value);
-    Load(node->value());
-
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    Result value = frame()->Pop();
-    frame()->Push(prop->obj());
-    frame()->Push(&value);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  Result answer = EmitNamedStore(name, is_contextual);
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment.  Swap
-      // the receiver and the value of the assignment expression.
-      Result result = frame()->Pop();
-      Result receiver = frame()->Pop();
-      frame()->Push(&result);
-      frame()->Push(&receiver);
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame()->PushElementAt(1);
-    frame()->PushElementAt(1);
-    Result value = EmitKeyedLoad();
-    frame()->Push(&value);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  Result answer = EmitKeyedStore(prop->key()->type());
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Result result = frame()->Pop();
-    Result receiver = frame()->Pop();
-    frame()->Push(&result);
-    frame()->Push(&receiver);
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  ASSERT(!in_safe_int32_mode());
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame()->Push(&result);
-  }
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Throw");
-  Load(node->exception());
-  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Property");
-  Reference property(this, node);
-  property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Call");
-
-  Expression* function = node->expression();
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Check if the function is a variable or a property.
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare the stack for the call to the resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->Push(FACTORY->undefined_value());
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Result to hold the result of the function resolution and the
-    // final result of the eval call.
-    Result result;
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                                     NOT_INSIDE_TYPEOF,
-                                                     &slow);
-      frame_->Push(&fun);
-      if (arg_count > 0) {
-        frame_->PushElementAt(arg_count);
-      } else {
-        frame_->Push(FACTORY->undefined_value());
-      }
-      frame_->PushParameterAt(-1);
-
-      // Push the strict mode flag.
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-      // Resolve the call.
-      result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump(&result);
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    frame_->PushElementAt(arg_count + 1);
-    if (arg_count > 0) {
-      frame_->PushElementAt(arg_count);
-    } else {
-      frame_->Push(FACTORY->undefined_value());
-    }
-    frame_->PushParameterAt(-1);
-
-    // Push the strict mode flag.
-    frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-    // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind(&result);
-
-    // The runtime call returns a pair of values in eax (function) and
-    // edx (receiver). Touch up the stack with the right values.
-    Result receiver = allocator_->Allocate(edx);
-    frame_->SetElementAt(arg_count + 1, &result);
-    frame_->SetElementAt(arg_count, &receiver);
-    receiver.Unuse();
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    result = frame_->CallStub(&call_function, arg_count + 1);
-
-    // Restore the context and overwrite the function on the stack with
-    // the result.
-    frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &result);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Push the name of the function onto the frame.
-    frame_->Push(var->name());
-
-    // Call the IC initialization code.
-    CodeForSourcePosition(node->position());
-    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
-                                       arg_count,
-                                       loop_nesting());
-    frame_->RestoreContextRegister();
-    frame_->Push(&result);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-    Result function;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &function,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Enter the runtime system to load the function from the context.
-    // Sync the frame so we can push the arguments directly into
-    // place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(var->name()));
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // The runtime call returns a pair of values in eax and edx.  The
-    // looked-up function is in eax and the receiver is in edx.  These
-    // register references are not ref counted here.  We spill them
-    // eagerly since they are arguments to an inevitable call (and are
-    // not sharable by the arguments).
-    ASSERT(!allocator()->is_used(eax));
-    frame_->EmitPush(eax);
-
-    // Load the receiver.
-    ASSERT(!allocator()->is_used(edx));
-    frame_->EmitPush(edx);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind(&function);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      call.Bind();
-    }
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Push the name of the function onto the frame.
-        frame_->Push(name);
-
-        // Call the IC initialization code.
-        CodeForSourcePosition(node->position());
-        Result result =
-            frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
-                               loop_nesting());
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the function to call from the property through a reference.
-
-      // Pass receiver to called function.
-      if (property->is_synthetic()) {
-        Reference ref(this, property);
-        ref.GetValue();
-        // Use global object as receiver.
-        LoadGlobalReceiver();
-        // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the name of the function.
-        Load(property->key());
-
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Result key = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&key);
-        frame_->Push(&receiver);
-        key.Unuse();
-        receiver.Unuse();
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Place the key on top of stack and call the IC initialization code.
-        frame_->PushElementAt(arg_count + 1);
-        CodeForSourcePosition(node->position());
-        Result result =
-            frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
-                                    arg_count,
-                                    loop_nesting());
-        frame_->Drop();  // Drop the key still on the stack.
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-    }
-
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver();
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-  }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Result result = frame_->CallConstructor(arg_count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // Conditionally generate a log call.
-  // Args:
-  //   0 (literal string): The type of logging (corresponds to the flags).
-  //     This is used to determine whether or not to generate the log call.
-  //   1 (string): Format string.  Access the string at argument index 2
-  //     with '%2s' (see Logger::LogRuntime for all the formats).
-  //   2 (array): Arguments to the format string.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ Set(result_, Immediate(FACTORY->undefined_value()));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ Set(result_, Immediate(FACTORY->nan_value()));
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need two extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object.reg(),
-                                   index.reg(),
-                                   scratch.reg(),
-                                   result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Result code = frame_->Pop();
-  code.ToRegister();
-  ASSERT(code.is_valid());
-
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code.reg(), result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ Set(result_, Immediate(Smi::FromInt(0)));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ Set(result_, Immediate(FACTORY->empty_string()));
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need three extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch1 = allocator()->Allocate();
-  ASSERT(scratch1.is_valid());
-  Result scratch2 = allocator()->Allocate();
-  ASSERT(scratch2.is_valid());
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object.reg(),
-                               index.reg(),
-                               scratch1.reg(),
-                               scratch2.reg(),
-                               result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // Check if the object is a JS array or not.
-  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
-  value.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop, loop_condition,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
-  ASSERT(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  Load(args->at(1));
-  // Load this to eax (= array)
-  Load(args->at(0));
-  Result array_result = frame_->Pop();
-  array_result.ToRegister(eax);
-  frame_->SpillAll();
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = eax;
-  Register elements = no_reg;  // Will be eax.
-
-  Register index = edx;
-
-  Register string_length = ecx;
-
-  Register string = esi;
-
-  Register scratch = ebx;
-
-  Register array_length = edi;
-  Register result_pos = no_reg;  // Will be edi.
-
-  // Separator operand is already pushed.
-  Operand separator_operand = Operand(esp, 2 * kPointerSize);
-  Operand result_operand = Operand(esp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ test(array, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
-            1 << Map::kHasFastElements);
-  __ j(zero, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ sar(array_length, 1);
-  __ j(not_zero, &non_trivial_array);
-  __ mov(result_operand, FACTORY->empty_string());
-  __ jmp(&done);
-
-  // Save the array length.
-  __ bind(&non_trivial_array);
-  __ mov(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential ASCII strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Set(index, Immediate(0));
-  __ Set(string_length, Immediate(0));
-  // Loop condition: while (index < length).
-  // Live loop registers: index, array_length, string,
-  //                      scratch, string_length, elements.
-  __ jmp(&loop_condition);
-  __ bind(&loop);
-  __ cmp(index, Operand(array_length));
-  __ j(greater_equal, &done);
-
-  __ mov(string, FieldOperand(elements, index,
-                                      times_pointer_size,
-                                      FixedArray::kHeaderSize));
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  __ add(string_length,
-         FieldOperand(string, SeqAsciiString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ add(Operand(index), Immediate(1));
-  __ bind(&loop_condition);
-  __ cmp(index, Operand(array_length));
-  __ j(less, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, 1);
-  __ j(not_equal, &not_size_one_array);
-  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ mov(result_operand, scratch);
-  __ jmp(&done);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths, as a smi.
-  // elements: FixedArray of strings.
-
-  // Check that the separator is a flat ASCII string.
-  __ mov(string, separator_operand);
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Add (separator length times array_length) - separator length
-  // to string_length.
-  __ mov(scratch, separator_operand);
-  __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
-  __ imul(scratch, array_length_operand);
-  __ j(overflow, &bailout);
-  __ add(string_length, Operand(scratch));
-  __ j(overflow, &bailout);
-
-  __ shr(string_length, 1);
-  // Live registers and stack values:
-  //   string_length
-  //   elements
-  __ AllocateAsciiString(result_pos, string_length, scratch,
-                         index, string, &bailout);
-  __ mov(result_operand, result_pos);
-  __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
-  __ mov(string, separator_operand);
-  __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case
-  __ mov(index, Immediate(0));
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-  __ bind(&loop_1_condition);
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_1);  // End while (index < length).
-  __ jmp(&done);
-
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
-  __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ mov_b(separator_operand, scratch);
-
-  __ Set(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator character to the result.
-  __ mov_b(scratch, separator_operand);
-  __ mov_b(Operand(result_pos, 0), scratch);
-  __ inc(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  __ Set(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator to the result.
-  __ mov(string, separator_operand);
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_3);  // End while (index < length).
-  __ jmp(&done);
-
-
-  __ bind(&bailout);
-  __ mov(result_operand, FACTORY->undefined_value());
-  __ bind(&done);
-  __ mov(eax, result_operand);
-  // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
-
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  frame_->Drop(1);
-  frame_->Push(&array_result);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // Check if the object is a regexp.
-  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
-  value.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  __ cmp(obj.reg(), FACTORY->null_value());
-  destination()->true_target()->Branch(equal);
-
-  Result map = allocator()->Allocate();
-  ASSERT(map.is_valid());
-  __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  destination()->false_target()->Branch(not_zero);
-  // Do a range test for JSObject type.  We can't use
-  // MacroAssembler::IsInstanceJSObjectType, because we are using a
-  // ControlDestination, so we copy its implementation here.
-  __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
-  __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
-  __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-  obj.Unuse();
-  map.Unuse();
-  destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-
-  // Check that this is an object.
-  frame_->Spill(value.reg());
-  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
-  value.Unuse();
-  destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-      __ Assert(equal, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
-    __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ cmp(scratch1_, FACTORY->hash_table_map());
-    __ j(equal, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ mov(map_result_,
-           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch1_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kPointerSize == 4);
-    __ lea(scratch1_,
-           Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
-    // Calculate location of the first key name.
-    __ add(Operand(map_result_),
-           Immediate(FixedArray::kHeaderSize +
-                     DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(scratch2_, FieldOperand(map_result_, 0));
-    __ cmp(scratch2_, FACTORY->value_of_symbol());
-    __ j(equal, &false_result);
-    __ add(Operand(map_result_), Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmp(map_result_, Operand(scratch1_));
-    __ j(not_equal, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
-    __ test(scratch1_, Immediate(kSmiTagMask));
-    __ j(zero, &false_result);
-    __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ mov(scratch2_,
-           FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ cmp(scratch1_,
-           ContextOperand(scratch2_,
-                          Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ j(not_equal, &false_result);
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ Set(map_result_, Immediate(1));
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ Set(map_result_, Immediate(0));
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();  // Pop the string wrapper.
-  obj.ToRegister();
-  ASSERT(obj.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj.reg());
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Result map_result = allocator()->Allocate();
-  ASSERT(map_result.is_valid());
-  __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
-            1 << Map::kStringWrapperSafeForDefaultValueOf);
-  destination()->true_target()->Branch(not_zero);
-
-  // We need an additional two scratch registers for the deferred code.
-  Result temp1 = allocator()->Allocate();
-  ASSERT(temp1.is_valid());
-  Result temp2 = allocator()->Allocate();
-  ASSERT(temp2.is_valid());
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
-  deferred->Branch(zero);
-  deferred->BindExit();
-  __ test(map_result.reg(), Operand(map_result.reg()));
-  obj.Unuse();
-  map_result.Unuse();
-  temp1.Unuse();
-  temp2.Unuse();
-  destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
-  obj.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ mov(temp.reg(),
-         FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  obj.Unuse();
-  temp.Unuse();
-  destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  // Get the frame pointer for the calling frame.
-  Result fp = allocator()->Allocate();
-  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker);
-  __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  fp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Result fp = allocator_->Allocate();
-  Result result = allocator_->Allocate();
-  ASSERT(fp.is_valid() && result.is_valid());
-
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ mov(result.reg(),
-         Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  result.set_type_info(TypeInfo::Smi());
-  if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave, null, function, non_function_constructor;
-  Load(args->at(0));  // Load the object.
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  frame_->Spill(obj.reg());
-
-  // If the object is a smi, we return null.
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  null.Branch(zero);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(below);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
-  function.Branch(equal);
-
-  // Check if the constructor in the map is a function.
-  { Result tmp = allocator()->Allocate();
-    __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
-    __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
-    non_function_constructor.Branch(not_equal);
-  }
-
-  // The map register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ mov(obj.reg(),
-         FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
-  __ mov(obj.reg(),
-         FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->Push(&obj);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  frame_->Push(FACTORY->function_class_symbol());
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  frame_->Push(FACTORY->Object_symbol());
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  frame_->Push(FACTORY->null_value());
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  frame_->Dup();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  ASSERT(object.is_valid());
-  // if (object->IsSmi()) return object.
-  __ test(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero, taken);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // if (!object->IsJSValue()) return object.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
-  leave.Branch(not_equal, not_taken);
-  __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
-  object.Unuse();
-  frame_->SetElementAt(0, &temp);
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  Result value = frame_->Pop();
-  Result object = frame_->Pop();
-  value.ToRegister();
-  object.ToRegister();
-
-  // if (object->IsSmi()) return value.
-  __ test(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero, &value, taken);
-
-  // It is a heap object - get its map.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  // if (!object->IsJSValue()) return value.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
-  leave.Branch(not_equal, &value, not_taken);
-
-  // Store the value.
-  __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  Result duplicate_value = allocator_->Allocate();
-  ASSERT(duplicate_value.is_valid());
-  __ mov(duplicate_value.reg(), value.reg());
-  // The object register is also overwritten by the write barrier and
-  // possibly aliased in the frame.
-  frame_->Spill(object.reg());
-  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
-                 scratch.reg());
-  object.Unuse();
-  scratch.Unuse();
-  duplicate_value.Unuse();
-
-  // Leave.
-  leave.Bind(&value);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  Load(args->at(0));
-  Result key = frame_->Pop();
-  // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  Result result = frame_->CallStub(&stub, &key, &count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-  right.ToRegister();
-  left.ToRegister();
-  __ cmp(right.reg(), Operand(left.reg()));
-  right.Unuse();
-  left.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  STATIC_ASSERT(kSmiTag == 0);  // EBP value is aligned, so it looks like a Smi.
-  Result ebp_as_smi = allocator_->Allocate();
-  ASSERT(ebp_as_smi.is_valid());
-  __ mov(ebp_as_smi.reg(), Operand(ebp));
-  frame_->Push(&ebp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  frame_->SpillAll();
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-
-  __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ mov(edi, eax);
-
-  __ bind(&heapnumber_allocated);
-
-  __ PrepareCallCFunction(1, ebx);
-  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
-                   1);
-
-  // Convert 32 random bits in eax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  // This is implemented on both SSE2 and FPU.
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope fscope(SSE2);
-    __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, Operand(ebx));
-    __ movd(xmm0, Operand(eax));
-    __ cvtss2sd(xmm1, xmm1);
-    __ pxor(xmm0, xmm1);
-    __ subsd(xmm0, xmm1);
-    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
-  } else {
-    // 0x4130000000000000 is 1.0 x 2^20 as a double.
-    __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
-           Immediate(0x41300000));
-    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
-    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
-    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
-    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
-    __ fsubp(1);
-    __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
-  }
-  __ mov(eax, edi);
-
-  Result result = allocator_->Allocate(eax);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  Result answer = frame_->CallStub(&stub, 3);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(4, args->length());
-
-  // Load the arguments on the stack and call the stub.
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-
-  RegExpExecStub stub;
-  Result result = frame_->CallStub(&stub, 4);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-
-  RegExpConstructResultStub stub;
-  Result result = frame_->CallStub(&stub, 3);
-  frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;    // on invocation Smi index of finger, on exit
-                    // holds value being looked up.
-  Register cache_;  // instance of JSFunctionResultCache.
-  Register key_;    // key being looked up.
-};
-
-
-void DeferredSearchCache::Generate() {
-  Label first_loop, search_further, second_loop, cache_miss;
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
-  Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
-
-  // Check the cache from finger to start of the cache.
-  __ bind(&first_loop);
-  __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
-  __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
-  __ j(less, &search_further);
-
-  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
-  __ j(not_equal, &first_loop);
-
-  __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&search_further);
-
-  // Check the cache from end of cache up to finger.
-  __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
-
-  __ bind(&second_loop);
-  __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
-    // Consider prefetching into some reg.
-  __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-  __ j(less_equal, &cache_miss);
-
-  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
-  __ j(not_equal, &second_loop);
-
-  __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&cache_miss);
-  __ push(cache_);  // store a reference to cache
-  __ push(key_);  // store a key
-  __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ push(key_);
-  // On ia32 function must be in edi.
-  __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
-  ParameterCount expected(1);
-  __ InvokeFunction(edi, expected, CALL_FUNCTION);
-
-  // Find a place to put new cached value into.
-  Label add_new_entry, update_cache;
-  __ mov(ecx, Operand(esp, kPointerSize));  // restore the cache
-  // Possible optimization: cache size is constant for the given cache
-  // so technically we could use a constant here.  However, if we have
-  // cache miss this optimization would hardly matter much.
-
-  // Check if we could add new entry to cache.
-  __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
-  __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
-  __ j(greater, &add_new_entry);
-
-  // Check if we could evict entry after finger.
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
-  __ add(Operand(edx), Immediate(kEntrySizeSmi));
-  __ cmp(ebx, Operand(edx));
-  __ j(greater, &update_cache);
-
-  // Need to wrap over the cache.
-  __ mov(edx, Immediate(kEntriesIndexSmi));
-  __ jmp(&update_cache);
-
-  __ bind(&add_new_entry);
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
-  __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
-  __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
-
-  // Update the cache itself.
-  // edx holds the index.
-  __ bind(&update_cache);
-  __ pop(ebx);  // restore the key
-  __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
-  // Store key.
-  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
-  __ RecordWrite(ecx, 0, ebx, edx);
-
-  // Store value.
-  __ pop(ecx);  // restore the cache.
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
-  __ add(Operand(edx), Immediate(Smi::FromInt(1)));
-  __ mov(ebx, eax);
-  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
-  __ RecordWrite(ecx, 0, ebx, edx);
-
-  if (!dst_.is(eax)) {
-    __ mov(dst_, eax);
-  }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      masm()->isolate()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->Push(FACTORY->undefined_value());
-    return;
-  }
-
-  Load(args->at(1));
-  Result key = frame_->Pop();
-  key.ToRegister();
-
-  Result cache = allocator()->Allocate();
-  ASSERT(cache.is_valid());
-  __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
-  __ mov(cache.reg(),
-         FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
-  __ mov(cache.reg(),
-         ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ mov(cache.reg(),
-         FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
-                                                          cache.reg(),
-                                                          key.reg());
-
-  // tmp.reg() now holds finger offset as a smi.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ mov(tmp.reg(), FieldOperand(cache.reg(),
-                                 JSFunctionResultCache::kFingerOffset));
-  __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
-  deferred->Branch(not_equal);
-
-  __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
-
-  deferred->BindExit();
-  frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and call the stub.
-  Load(args->at(0));
-  NumberToStringStub stub;
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  // Note: this code assumes that indices are passed are within
-  // elements' bounds and refer to valid (not holes) values.
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  Result index2 = frame_->Pop();
-  index2.ToRegister();
-
-  Result index1 = frame_->Pop();
-  index1.ToRegister();
-
-  Result object = frame_->Pop();
-  object.ToRegister();
-
-  Result tmp1 = allocator()->Allocate();
-  tmp1.ToRegister();
-  Result tmp2 = allocator()->Allocate();
-  tmp2.ToRegister();
-
-  frame_->Spill(object.reg());
-  frame_->Spill(index1.reg());
-  frame_->Spill(index2.reg());
-
-  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
-                                                            index1.reg(),
-                                                            index2.reg());
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
-  deferred->Branch(below);
-  __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
-            KeyedLoadIC::kSlowCaseBitFieldMask);
-  deferred->Branch(not_zero);
-
-  // Check the object's elements are in fast case and writable.
-  __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
-  __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
-         Immediate(FACTORY->fixed_array_map()));
-  deferred->Branch(not_equal);
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  // Check that both indices are smis.
-  __ mov(tmp2.reg(), index1.reg());
-  __ or_(tmp2.reg(), Operand(index2.reg()));
-  __ test(tmp2.reg(), Immediate(kSmiTagMask));
-  deferred->Branch(not_zero);
-
-  // Check that both indices are valid.
-  __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ cmp(tmp2.reg(), Operand(index1.reg()));
-  deferred->Branch(below_equal);
-  __ cmp(tmp2.reg(), Operand(index2.reg()));
-  deferred->Branch(below_equal);
-
-  // Bring addresses into index1 and index2.
-  __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
-  __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
-
-  // Swap elements.
-  __ mov(object.reg(), Operand(index1.reg(), 0));
-  __ mov(tmp2.reg(),   Operand(index2.reg(), 0));
-  __ mov(Operand(index2.reg(), 0), object.reg());
-  __ mov(Operand(index1.reg(), 0), tmp2.reg());
-
-  Label done;
-  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  __ mov(tmp2.reg(), tmp1.reg());
-  __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
-  __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
-  __ bind(&done);
-
-  deferred->BindExit();
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  Result result = frame_->CallJSFunction(n_args);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-  if (!CpuFeatures::IsSupported(SSE2)) {
-    Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
-    frame_->Push(&res);
-  } else {
-    CpuFeatures::Scope use_sse2(SSE2);
-    Label allocate_return;
-    // Load the two operands while leaving the values on the frame.
-    frame()->Dup();
-    Result exponent = frame()->Pop();
-    exponent.ToRegister();
-    frame()->Spill(exponent.reg());
-    frame()->PushElementAt(1);
-    Result base = frame()->Pop();
-    base.ToRegister();
-    frame()->Spill(base.reg());
-
-    Result answer = allocator()->Allocate();
-    ASSERT(answer.is_valid());
-    ASSERT(!exponent.reg().is(base.reg()));
-    JumpTarget call_runtime;
-
-    // Save 1 in xmm3 - we need this several times later on.
-    __ mov(answer.reg(), Immediate(1));
-    __ cvtsi2sd(xmm3, Operand(answer.reg()));
-
-    Label exponent_nonsmi;
-    Label base_nonsmi;
-    // If the exponent is a heap number go to that specific case.
-    __ test(exponent.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &exponent_nonsmi);
-    __ test(base.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &base_nonsmi);
-
-    // Optimized version when y is an integer.
-    Label powi;
-    __ SmiUntag(base.reg());
-    __ cvtsi2sd(xmm0, Operand(base.reg()));
-    __ jmp(&powi);
-    // exponent is smi and base is a heapnumber.
-    __ bind(&base_nonsmi);
-    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-
-    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-    // Optimized version of pow if y is an integer.
-    __ bind(&powi);
-    __ SmiUntag(exponent.reg());
-
-    // Save exponent in base as we need to check if exponent is negative later.
-    // We know that base and exponent are in different registers.
-    __ mov(base.reg(), exponent.reg());
-
-    // Get absolute value of exponent.
-    Label no_neg;
-    __ cmp(exponent.reg(), 0);
-    __ j(greater_equal, &no_neg);
-    __ neg(exponent.reg());
-    __ bind(&no_neg);
-
-    // Load xmm1 with 1.
-    __ movsd(xmm1, xmm3);
-    Label while_true;
-    Label no_multiply;
-
-    __ bind(&while_true);
-    __ shr(exponent.reg(), 1);
-    __ j(not_carry, &no_multiply);
-    __ mulsd(xmm1, xmm0);
-    __ bind(&no_multiply);
-    __ test(exponent.reg(), Operand(exponent.reg()));
-    __ mulsd(xmm0, xmm0);
-    __ j(not_zero, &while_true);
-
-    // x has the original value of y - if y is negative return 1/result.
-    __ test(base.reg(), Operand(base.reg()));
-    __ j(positive, &allocate_return);
-    // Special case if xmm1 has reached infinity.
-    __ mov(answer.reg(), Immediate(0x7FB00000));
-    __ movd(xmm0, Operand(answer.reg()));
-    __ cvtss2sd(xmm0, xmm0);
-    __ ucomisd(xmm0, xmm1);
-    call_runtime.Branch(equal);
-    __ divsd(xmm3, xmm1);
-    __ movsd(xmm1, xmm3);
-    __ jmp(&allocate_return);
-
-    // exponent (or both) is a heapnumber - no matter what we should now work
-    // on doubles.
-    __ bind(&exponent_nonsmi);
-    __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-    __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
-    // Test if exponent is nan.
-    __ ucomisd(xmm1, xmm1);
-    call_runtime.Branch(parity_even);
-
-    Label base_not_smi;
-    Label handle_special_cases;
-    __ test(base.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &base_not_smi);
-    __ SmiUntag(base.reg());
-    __ cvtsi2sd(xmm0, Operand(base.reg()));
-    __ jmp(&handle_special_cases);
-    __ bind(&base_not_smi);
-    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-    __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
-    __ and_(answer.reg(), HeapNumber::kExponentMask);
-    __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
-    // base is NaN or +/-Infinity
-    call_runtime.Branch(greater_equal);
-    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-    // base is in xmm0 and exponent is in xmm1.
-    __ bind(&handle_special_cases);
-    Label not_minus_half;
-    // Test for -0.5.
-    // Load xmm2 with -0.5.
-    __ mov(answer.reg(), Immediate(0xBF000000));
-    __ movd(xmm2, Operand(answer.reg()));
-    __ cvtss2sd(xmm2, xmm2);
-    // xmm2 now has -0.5.
-    __ ucomisd(xmm2, xmm1);
-    __ j(not_equal, &not_minus_half);
-
-    // Calculates reciprocal of square root.
-    // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-    __ xorpd(xmm1, xmm1);
-    __ addsd(xmm1, xmm0);
-    __ sqrtsd(xmm1, xmm1);
-    __ divsd(xmm3, xmm1);
-    __ movsd(xmm1, xmm3);
-    __ jmp(&allocate_return);
-
-    // Test for 0.5.
-    __ bind(&not_minus_half);
-    // Load xmm2 with 0.5.
-    // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-    __ addsd(xmm2, xmm3);
-    // xmm2 now has 0.5.
-    __ ucomisd(xmm2, xmm1);
-    call_runtime.Branch(not_equal);
-    // Calculates square root.
-    // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-    __ xorpd(xmm1, xmm1);
-    __ addsd(xmm1, xmm0);
-    __ sqrtsd(xmm1, xmm1);
-
-    JumpTarget done;
-    Label failure, success;
-    __ bind(&allocate_return);
-    // Make a copy of the frame to enable us to handle allocation
-    // failure after the JumpTarget jump.
-    VirtualFrame* clone = new VirtualFrame(frame());
-    __ AllocateHeapNumber(answer.reg(), exponent.reg(),
-                          base.reg(), &failure);
-    __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
-    // Remove the two original values from the frame - we only need those
-    // in the case where we branch to runtime.
-    frame()->Drop(2);
-    exponent.Unuse();
-    base.Unuse();
-    done.Jump(&answer);
-    // Use the copy of the original frame as our current frame.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    // If we experience an allocation failure we branch to runtime.
-    __ bind(&failure);
-    call_runtime.Bind();
-    answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
-    done.Bind(&answer);
-    frame()->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-
-  if (!CpuFeatures::IsSupported(SSE2)) {
-    Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-    frame()->Push(&result);
-  } else {
-    CpuFeatures::Scope use_sse2(SSE2);
-    // Leave original value on the frame if we need to call runtime.
-    frame()->Dup();
-    Result result = frame()->Pop();
-    result.ToRegister();
-    frame()->Spill(result.reg());
-    Label runtime;
-    Label non_smi;
-    Label load_done;
-    JumpTarget end;
-
-    __ test(result.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &non_smi);
-    __ SmiUntag(result.reg());
-    __ cvtsi2sd(xmm0, Operand(result.reg()));
-    __ jmp(&load_done);
-    __ bind(&non_smi);
-    __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    __ j(not_equal, &runtime);
-    __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
-    __ bind(&load_done);
-    __ sqrtsd(xmm0, xmm0);
-    // A copy of the virtual frame to allow us to go to runtime after the
-    // JumpTarget jump.
-    Result scratch = allocator()->Allocate();
-    VirtualFrame* clone = new VirtualFrame(frame());
-    __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
-
-    __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
-    frame()->Drop(1);
-    scratch.Unuse();
-    end.Jump(&result);
-    // We only branch to runtime if we have an allocation error.
-    // Use the copy of the original frame as our current frame.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    __ bind(&runtime);
-    result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
-    end.Bind(&result);
-    frame()->Push(&result);
-  }
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right_res = frame_->Pop();
-  Result left_res = frame_->Pop();
-  right_res.ToRegister();
-  left_res.ToRegister();
-  Result tmp_res = allocator()->Allocate();
-  ASSERT(tmp_res.is_valid());
-  Register right = right_res.reg();
-  Register left = left_res.reg();
-  Register tmp = tmp_res.reg();
-  right_res.Unuse();
-  left_res.Unuse();
-  tmp_res.Unuse();
-  __ cmp(left, Operand(right));
-  destination()->true_target()->Branch(equal);
-  // Fail if either is a non-HeapObject.
-  __ mov(tmp, left);
-  __ and_(Operand(tmp), right);
-  __ test(Operand(tmp), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
-  destination()->false_target()->Branch(not_equal);
-  __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  destination()->false_target()->Branch(not_equal);
-  __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
-  __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfNotString(value.reg());
-  }
-
-  __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result string = frame_->Pop();
-  string.ToRegister();
-  if (FLAG_debug_code) {
-    __ AbortIfNotString(string.reg());
-  }
-
-  Result number = allocator()->Allocate();
-  ASSERT(number.is_valid());
-  __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
-  __ IndexFromHash(number.reg(), number.reg());
-  string.Unuse();
-  frame_->Push(&number);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  ASSERT(!in_safe_int32_mode());
-  if (CheckForInlineRuntimeCall(node)) {
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Push the builtins object found in the current global object.
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), GlobalObjectOperand());
-    __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
-    frame_->Push(&temp);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    frame_->Push(node->name());
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                       arg_count,
-                                       loop_nesting_);
-    frame_->RestoreContextRegister();
-    frame_->Push(&answer);
-  } else {
-    // Call the C runtime function.
-    Result answer = frame_->CallRuntime(function, arg_count);
-    frame_->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    // Swap the true and false targets but keep the same actual label
-    // as the fall through.
-    destination()->Invert();
-    LoadCondition(node->expression(), destination(), true);
-    // Swap the labels back.
-    destination()->Invert();
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
-      frame_->Push(&answer);
-      return;
-    }
-
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->Push(variable->name());
-        frame_->Push(Smi::FromInt(kNonStrictMode));
-        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
-                                              CALL_FUNCTION, 3);
-        frame_->Push(&answer);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Call the runtime to delete from the context holding the named
-        // variable.  Sync the virtual frame eagerly so we can push the
-        // arguments directly into place.
-        frame_->SyncRange(0, frame_->element_count() - 1);
-        frame_->EmitPush(esi);
-        frame_->EmitPush(Immediate(variable->name()));
-        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->Push(&answer);
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->Push(FACTORY->false_value());
-      }
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->SetElementAt(0, FACTORY->true_value());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(&answer);
-
-  } else if (op == Token::VOID) {
-    Expression* expression = node->expression();
-    if (expression && expression->AsLiteral() && (
-        expression->AsLiteral()->IsTrue() ||
-        expression->AsLiteral()->IsFalse() ||
-        expression->AsLiteral()->handle()->IsNumber() ||
-        expression->AsLiteral()->handle()->IsString() ||
-        expression->AsLiteral()->handle()->IsJSRegExp() ||
-        expression->AsLiteral()->IsNull())) {
-      // Omit evaluating the value of the primitive literal.
-      // It will be discarded anyway, and can have no side effect.
-      frame_->Push(FACTORY->undefined_value());
-    } else {
-      Load(node->expression());
-      frame_->SetElementAt(0, FACTORY->undefined_value());
-    }
-
-  } else {
-    if (in_safe_int32_mode()) {
-      Visit(node->expression());
-      Result value = frame_->Pop();
-      ASSERT(value.is_untagged_int32());
-      // Registers containing an int32 value are not multiply used.
-      ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
-      value.ToRegister();
-      switch (op) {
-        case Token::SUB: {
-          __ neg(value.reg());
-          frame_->Push(&value);
-          if (node->no_negative_zero()) {
-            // -MIN_INT is MIN_INT with the overflow flag set.
-            unsafe_bailout_->Branch(overflow);
-          } else {
-            // MIN_INT and 0 both have bad negations.  They both have 31 zeros.
-            __ test(value.reg(), Immediate(0x7FFFFFFF));
-            unsafe_bailout_->Branch(zero);
-          }
-          break;
-        }
-        case Token::BIT_NOT: {
-          __ not_(value.reg());
-          frame_->Push(&value);
-          break;
-        }
-        case Token::ADD: {
-          // Unary plus has no effect on int32 values.
-          frame_->Push(&value);
-          break;
-        }
-        default:
-          UNREACHABLE();
-          break;
-      }
-    } else {
-      Load(node->expression());
-      bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      bool no_negative_zero = node->expression()->no_negative_zero();
-      switch (op) {
-        case Token::NOT:
-        case Token::DELETE:
-        case Token::TYPEOF:
-          UNREACHABLE();  // handled above
-          break;
-
-        case Token::SUB: {
-          GenericUnaryOpStub stub(
-              Token::SUB,
-              overwrite,
-              NO_UNARY_FLAGS,
-              no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-          Result operand = frame_->Pop();
-          Result answer = frame_->CallStub(&stub, &operand);
-          answer.set_type_info(TypeInfo::Number());
-          frame_->Push(&answer);
-          break;
-        }
-        case Token::BIT_NOT: {
-          // Smi check.
-          JumpTarget smi_label;
-          JumpTarget continue_label;
-          Result operand = frame_->Pop();
-          TypeInfo operand_info = operand.type_info();
-          operand.ToRegister();
-          if (operand_info.IsSmi()) {
-            if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
-            frame_->Spill(operand.reg());
-            // Set smi tag bit. It will be reset by the not operation.
-            __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
-            __ not_(operand.reg());
-            Result answer = operand;
-            answer.set_type_info(TypeInfo::Smi());
-            frame_->Push(&answer);
-          } else {
-            __ test(operand.reg(), Immediate(kSmiTagMask));
-            smi_label.Branch(zero, &operand, taken);
-
-            GenericUnaryOpStub stub(Token::BIT_NOT,
-                                    overwrite,
-                                    NO_UNARY_SMI_CODE_IN_STUB);
-            Result answer = frame_->CallStub(&stub, &operand);
-            continue_label.Jump(&answer);
-
-            smi_label.Bind(&answer);
-            answer.ToRegister();
-            frame_->Spill(answer.reg());
-            // Set smi tag bit. It will be reset by the not operation.
-            __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
-            __ not_(answer.reg());
-
-            continue_label.Bind(&answer);
-            answer.set_type_info(TypeInfo::Integer32());
-            frame_->Push(&answer);
-          }
-          break;
-        }
-        case Token::ADD: {
-          // Smi check.
-          JumpTarget continue_label;
-          Result operand = frame_->Pop();
-          TypeInfo operand_info = operand.type_info();
-          operand.ToRegister();
-          __ test(operand.reg(), Immediate(kSmiTagMask));
-          continue_label.Branch(zero, &operand, taken);
-
-          frame_->Push(&operand);
-          Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
-                                              CALL_FUNCTION, 1);
-
-          continue_label.Bind(&answer);
-          if (operand_info.IsSmi()) {
-            answer.set_type_info(TypeInfo::Smi());
-          } else if (operand_info.IsInteger32()) {
-            answer.set_type_info(TypeInfo::Integer32());
-          } else {
-            answer.set_type_info(TypeInfo::Number());
-          }
-          frame_->Push(&answer);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
-    }
-  }
-}
-
-
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub.  The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
-  DeferredPrefixCountOperation(Register dst,
-                               bool is_increment,
-                               TypeInfo input_type)
-      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
-  } else {
-    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
-  }
-  Register left;
-  if (input_type_.IsNumber()) {
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    left = eax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation and call
-// into the runtime to convert the argument to a number.  Update the
-// original value in old.  Call the specialized add or subtract stub.
-// The result is left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
-  DeferredPostfixCountOperation(Register dst,
-                                Register old,
-                                bool is_increment,
-                                TypeInfo input_type)
-      : dst_(dst),
-        old_(old),
-        is_increment_(is_increment),
-        input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Register old_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
-  } else {
-    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
-  }
-  Register left;
-  if (input_type_.IsNumber()) {
-    __ push(dst_);  // Save the input to use as the old value.
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    __ push(eax);  // Save the result of ToNumber to use as the old value.
-    left = eax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CountOperation");
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
-  // Postfix operations need a stack slot under the reference to hold
-  // the old value while the new value is being stored.  This is so that
-  // in the case that storing the new value requires a call, the old
-  // value will be in the frame to be spilled.
-  if (is_postfix) frame_->Push(Smi::FromInt(0));
-
-  // A constant reference is not saved to, so a constant reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!is_postfix) frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    target.TakeValue();
-
-    Result new_value = frame_->Pop();
-    new_value.ToRegister();
-
-    Result old_value;  // Only allocated in the postfix case.
-    if (is_postfix) {
-      // Allocate a temporary to preserve the old value.
-      old_value = allocator_->Allocate();
-      ASSERT(old_value.is_valid());
-      __ mov(old_value.reg(), new_value.reg());
-
-      // The return value for postfix operations is ToNumber(input).
-      // Keep more precise type info if the input is some kind of
-      // number already. If the input is not a number we have to wait
-      // for the deferred code to convert it.
-      if (new_value.type_info().IsNumber()) {
-        old_value.set_type_info(new_value.type_info());
-      }
-    }
-
-    // Ensure the new value is writable.
-    frame_->Spill(new_value.reg());
-
-    Result tmp;
-    if (new_value.is_smi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
-    } else {
-      // We don't know statically if the input is a smi.
-      // In order to combine the overflow and the smi tag check, we need
-      // to be able to allocate a byte register.  We attempt to do so
-      // without spilling.  If we fail, we will generate separate overflow
-      // and smi tag checks.
-      // We allocate and clear a temporary byte register before performing
-      // the count operation since clearing the register using xor will clear
-      // the overflow flag.
-      tmp = allocator_->AllocateByteRegisterWithoutSpilling();
-      if (tmp.is_valid()) {
-        __ Set(tmp.reg(), Immediate(0));
-      }
-    }
-
-    if (is_increment) {
-      __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
-    } else {
-      __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
-    }
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment,
-                                                   new_value.type_info());
-    } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment,
-                                                  new_value.type_info());
-    }
-
-    if (new_value.is_smi()) {
-      // In case we have a smi as input just check for overflow.
-      deferred->Branch(overflow);
-    } else {
-      // If the count operation didn't overflow and the result is a valid
-      // smi, we're done. Otherwise, we jump to the deferred slow-case
-      // code.
-      // We combine the overflow and the smi tag check if we could
-      // successfully allocate a temporary byte register.
-      if (tmp.is_valid()) {
-        __ setcc(overflow, tmp.reg());
-        __ or_(Operand(tmp.reg()), new_value.reg());
-        __ test(tmp.reg(), Immediate(kSmiTagMask));
-        tmp.Unuse();
-        deferred->Branch(not_zero);
-      } else {
-        // Otherwise we test separately for overflow and smi tag.
-        deferred->Branch(overflow);
-        __ test(new_value.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      }
-    }
-    deferred->BindExit();
-
-    // Postfix count operations return their input converted to
-    // number. The case when the input is already a number is covered
-    // above in the allocation code for old_value.
-    if (is_postfix && !new_value.type_info().IsNumber()) {
-      old_value.set_type_info(TypeInfo::Number());
-    }
-
-    // The result of ++ or -- is an Integer32 if the
-    // input is a smi. Otherwise it is a number.
-    if (new_value.is_smi()) {
-      new_value.set_type_info(TypeInfo::Integer32());
-    } else {
-      new_value.set_type_info(TypeInfo::Number());
-    }
-
-    // Postfix: store the old value in the allocated slot under the
-    // reference.
-    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
-    frame_->Push(&new_value);
-    // Non-constant: update the reference.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
-  }
-
-  // Postfix: drop the new value and use the old.
-  if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
-  Token::Value op = node->op();
-  Comment cmnt(masm_, "[ Int32BinaryOperation");
-  ASSERT(in_safe_int32_mode());
-  ASSERT(safe_int32_mode_enabled());
-  ASSERT(FLAG_safe_int32_compiler);
-
-  if (op == Token::COMMA) {
-    // Discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  ASSERT(right.is_untagged_int32());
-  ASSERT(left.is_untagged_int32());
-  // Registers containing an int32 value are not multiply used.
-  ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
-  ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
-
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-    case Token::AND:
-      UNREACHABLE();
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      if (left.is_constant() || right.is_constant()) {
-        int32_t value;  // Put constant in value, non-constant in left.
-        // Constants are known to be int32 values, from static analysis,
-        // or else will be converted to int32 by implicit ECMA [[ToInt32]].
-        if (left.is_constant()) {
-          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
-          value = NumberToInt32(*left.handle());
-          left = right;
-        } else {
-          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-          value = NumberToInt32(*right.handle());
-        }
-
-        left.ToRegister();
-        if (op == Token::BIT_OR) {
-          __ or_(Operand(left.reg()), Immediate(value));
-        } else if (op == Token::BIT_XOR) {
-          __ xor_(Operand(left.reg()), Immediate(value));
-        } else {
-          ASSERT(op == Token::BIT_AND);
-          __ and_(Operand(left.reg()), Immediate(value));
-        }
-      } else {
-        ASSERT(left.is_register());
-        ASSERT(right.is_register());
-        if (op == Token::BIT_OR) {
-          __ or_(left.reg(), Operand(right.reg()));
-        } else if (op == Token::BIT_XOR) {
-          __ xor_(left.reg(), Operand(right.reg()));
-        } else {
-          ASSERT(op == Token::BIT_AND);
-          __ and_(left.reg(), Operand(right.reg()));
-        }
-      }
-      frame_->Push(&left);
-      right.Unuse();
-      break;
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR: {
-      bool test_shr_overflow = false;
-      left.ToRegister();
-      if (right.is_constant()) {
-        ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-        int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
-        if (op == Token::SAR) {
-          __ sar(left.reg(), shift_amount);
-        } else if (op == Token::SHL) {
-          __ shl(left.reg(), shift_amount);
-        } else {
-          ASSERT(op == Token::SHR);
-          __ shr(left.reg(), shift_amount);
-          if (shift_amount == 0) test_shr_overflow = true;
-        }
-      } else {
-        // Move right to ecx
-        if (left.is_register() && left.reg().is(ecx)) {
-          right.ToRegister();
-          __ xchg(left.reg(), right.reg());
-          left = right;  // Left is unused here, copy of right unused by Push.
-        } else {
-          right.ToRegister(ecx);
-          left.ToRegister();
-        }
-        if (op == Token::SAR) {
-          __ sar_cl(left.reg());
-        } else if (op == Token::SHL) {
-          __ shl_cl(left.reg());
-        } else {
-          ASSERT(op == Token::SHR);
-          __ shr_cl(left.reg());
-          test_shr_overflow = true;
-        }
-      }
-      {
-        Register left_reg = left.reg();
-        frame_->Push(&left);
-        right.Unuse();
-        if (test_shr_overflow && !node->to_int32()) {
-          // Uint32 results with top bit set are not Int32 values.
-          // If they will be forced to Int32, skip the test.
-          // Test is needed because shr with shift amount 0 does not set flags.
-          __ test(left_reg, Operand(left_reg));
-          unsafe_bailout_->Branch(sign);
-        }
-      }
-      break;
-    }
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-      if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
-        int32_t value;  // Put constant in value, non-constant in left.
-        if (right.is_constant()) {
-          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-          value = NumberToInt32(*right.handle());
-        } else {
-          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
-          value = NumberToInt32(*left.handle());
-          left = right;
-        }
-
-        left.ToRegister();
-        if (op == Token::ADD) {
-          __ add(Operand(left.reg()), Immediate(value));
-        } else if (op == Token::SUB) {
-          __ sub(Operand(left.reg()), Immediate(value));
-        } else {
-          ASSERT(op == Token::MUL);
-          __ imul(left.reg(), left.reg(), value);
-        }
-      } else {
-        left.ToRegister();
-        ASSERT(left.is_register());
-        ASSERT(right.is_register());
-        if (op == Token::ADD) {
-          __ add(left.reg(), Operand(right.reg()));
-        } else if (op == Token::SUB) {
-          __ sub(left.reg(), Operand(right.reg()));
-        } else {
-          ASSERT(op == Token::MUL);
-          // We have statically verified that a negative zero can be ignored.
-          __ imul(left.reg(), Operand(right.reg()));
-        }
-      }
-      right.Unuse();
-      frame_->Push(&left);
-      if (!node->to_int32() || op == Token::MUL) {
-        // If ToInt32 is called on the result of ADD, SUB, we don't
-        // care about overflows.
-        // Result of MUL can be non-representable precisely in double so
-        // we have to check for overflow.
-        unsafe_bailout_->Branch(overflow);
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD: {
-      if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
-        if (left.is_register() && left.reg().is(edi)) {
-          right.ToRegister(ebx);
-        } else {
-          right.ToRegister(edi);
-        }
-      }
-      left.ToRegister(eax);
-      Result edx_reg = allocator_->Allocate(edx);
-      right.ToRegister();
-      // The results are unused here because BreakTarget::Branch cannot handle
-      // live results.
-      Register right_reg = right.reg();
-      left.Unuse();
-      right.Unuse();
-      edx_reg.Unuse();
-      __ cmp(right_reg, 0);
-      // Ensure divisor is positive: no chance of non-int32 or -0 result.
-      unsafe_bailout_->Branch(less_equal);
-      __ cdq();  // Sign-extend eax into edx:eax
-      __ idiv(right_reg);
-      if (op == Token::MOD) {
-        // Negative zero can arise as a negative divident with a zero result.
-        if (!node->no_negative_zero()) {
-          Label not_negative_zero;
-          __ test(edx, Operand(edx));
-          __ j(not_zero, &not_negative_zero);
-          __ test(eax, Operand(eax));
-          unsafe_bailout_->Branch(negative);
-          __ bind(&not_negative_zero);
-        }
-        Result edx_result(edx, TypeInfo::Integer32());
-        edx_result.set_untagged_int32(true);
-        frame_->Push(&edx_result);
-      } else {
-        ASSERT(op == Token::DIV);
-        __ test(edx, Operand(edx));
-        unsafe_bailout_->Branch(not_equal);
-        Result eax_result(eax, TypeInfo::Integer32());
-        eax_result.set_untagged_int32(true);
-        frame_->Push(&eax_result);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not
-  // control flow), we force the right hand side to do the same. This
-  // is necessary because we assume that if we get control flow on the
-  // last path out of an expression we got it on all paths.
-  if (node->op() == Token::AND) {
-    ASSERT(!in_safe_int32_mode());
-    JumpTarget is_true;
-    ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The current false target was used as the fall-through.  If
-      // there are no dangling jumps to is_true then the left
-      // subexpression was unconditionally false.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_true.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current false target was a forward jump then we have a
-        // valid frame, we have just bound the false target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->false_target()->Unuse();
-          destination()->false_target()->Jump();
-        }
-        is_true.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have actually just jumped to or bound the current false
-        // target but the current control destination is not marked as
-        // used.
-        destination()->Use(false);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_true
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_true
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&pop_and_continue, &exit, true);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    ASSERT(!in_safe_int32_mode());
-    JumpTarget is_false;
-    ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.true_was_fall_through()) {
-      // The current true target was used as the fall-through.  If
-      // there are no dangling jumps to is_false then the left
-      // subexpression was unconditionally true.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_false.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current true target was a forward jump then we have a
-        // valid frame, we have just bound the true target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->true_target()->Unuse();
-          destination()->true_target()->Jump();
-        }
-        is_false.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have just jumped to or bound the current true target but
-        // the current control destination is not marked as used.
-        destination()->Use(true);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_false
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_false
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&exit, &pop_and_continue, false);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else if (in_safe_int32_mode()) {
-    Visit(node->left());
-    Visit(node->right());
-    Int32BinaryOperation(node);
-  } else {
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    OverwriteMode overwrite_mode = NO_OVERWRITE;
-    if (node->left()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_LEFT;
-    } else if (node->right()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_RIGHT;
-    }
-
-    if (node->left()->IsTrivial()) {
-      Load(node->right());
-      Result right = frame_->Pop();
-      frame_->Push(node->left());
-      frame_->Push(&right);
-    } else {
-      Load(node->left());
-      Load(node->right());
-    }
-    GenericBinaryOperation(node, overwrite_mode);
-  }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  ASSERT(!in_safe_int32_mode());
-  frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  bool left_already_loaded = false;
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
-    // Load the operand and move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Result answer = frame_->Pop();
-    answer.ToRegister();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->true_target()->Branch(zero);
-      frame_->Spill(answer.reg());
-      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ cmp(answer.reg(), FACTORY->heap_number_map());
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-
-      // It can be an undetectable string object.
-      Result temp = allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
-      temp.Unuse();
-      answer.Unuse();
-      destination()->Split(below);
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ cmp(answer.reg(), FACTORY->true_value());
-      destination()->true_target()->Branch(equal);
-      __ cmp(answer.reg(), FACTORY->false_value());
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ cmp(answer.reg(), FACTORY->undefined_value());
-      destination()->true_target()->Branch(equal);
-
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-
-      // It can be an undetectable object.
-      frame_->Spill(answer.reg());
-      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      answer.Unuse();
-      destination()->Split(not_zero);
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-      frame_->Spill(answer.reg());
-      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
-      destination()->true_target()->Branch(equal);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
-      answer.Unuse();
-      destination()->Split(equal);
-    } else if (check->Equals(HEAP->object_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-      __ cmp(answer.reg(), FACTORY->null_value());
-      destination()->true_target()->Branch(equal);
-
-      Result map = allocator()->Allocate();
-      ASSERT(map.is_valid());
-      // Regular expressions are typeof == 'function', not 'object'.
-      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
-      destination()->false_target()->Branch(equal);
-
-      // It can be an undetectable object.
-      __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      destination()->false_target()->Branch(not_zero);
-      // Do a range test for JSObject type.  We can't use
-      // MacroAssembler::IsInstanceJSObjectType, because we are using a
-      // ControlDestination, so we copy its implementation here.
-      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
-      __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
-      __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-      answer.Unuse();
-      map.Unuse();
-      destination()->Split(below_equal);
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      answer.Unuse();
-      destination()->Goto(false);
-    }
-    return;
-  } else if (op == Token::LT &&
-             right->AsLiteral() != NULL &&
-             right->AsLiteral()->handle()->IsHeapNumber()) {
-    Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
-    if (check->value() == 2147483648.0) {  // 0x80000000.
-      Load(left);
-      left_already_loaded = true;
-      Result lhs = frame_->Pop();
-      lhs.ToRegister();
-      __ test(lhs.reg(), Immediate(kSmiTagMask));
-      destination()->true_target()->Branch(zero);  // All Smis are less.
-      Result scratch = allocator()->Allocate();
-      ASSERT(scratch.is_valid());
-      __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
-      __ cmp(scratch.reg(), FACTORY->heap_number_map());
-      JumpTarget not_a_number;
-      not_a_number.Branch(not_equal, &lhs);
-      __ mov(scratch.reg(),
-             FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
-      __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
-      not_a_number.Branch(above_equal, &lhs);  // It's a negative NaN or -Inf.
-      const uint32_t borderline_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
-      scratch.Unuse();
-      lhs.Unuse();
-      destination()->true_target()->Branch(less);
-      destination()->false_target()->Jump();
-
-      not_a_number.Bind(&lhs);
-      frame_->Push(&lhs);
-    }
-  }
-
-  Condition cc = no_condition;
-  bool strict = false;
-  switch (op) {
-    case Token::EQ_STRICT:
-      strict = true;
-      // Fall through
-    case Token::EQ:
-      cc = equal;
-      break;
-    case Token::LT:
-      cc = less;
-      break;
-    case Token::GT:
-      cc = greater;
-      break;
-    case Token::LTE:
-      cc = less_equal;
-      break;
-    case Token::GTE:
-      cc = greater_equal;
-      break;
-    case Token::IN: {
-      if (!left_already_loaded) Load(left);
-      Load(right);
-      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
-      frame_->Push(&answer);  // push the result
-      return;
-    }
-    case Token::INSTANCEOF: {
-      if (!left_already_loaded) Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      Result answer = frame_->CallStub(&stub, 2);
-      answer.ToRegister();
-      __ test(answer.reg(), Operand(answer.reg()));
-      answer.Unuse();
-      destination()->Split(zero);
-      return;
-    }
-    default:
-      UNREACHABLE();
-  }
-
-  if (left->IsTrivial()) {
-    if (!left_already_loaded) {
-      Load(right);
-      Result right_result = frame_->Pop();
-      frame_->Push(left);
-      frame_->Push(&right_result);
-    } else {
-      Load(right);
-    }
-  } else {
-    if (!left_already_loaded) Load(left);
-    Load(right);
-  }
-  Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Result operand = frame_->Pop();
-  operand.ToRegister();
-  __ cmp(operand.reg(), FACTORY->null_value());
-  if (node->is_strict()) {
-    operand.Unuse();
-    destination()->Split(equal);
-  } else {
-    // The 'null' value is only equal to 'undefined' if using non-strict
-    // comparisons.
-    destination()->true_target()->Branch(equal);
-    __ cmp(operand.reg(), FACTORY->undefined_value());
-    destination()->true_target()->Branch(equal);
-    __ test(operand.reg(), Immediate(kSmiTagMask));
-    destination()->false_target()->Branch(equal);
-
-    // It can be an undetectable object.
-    // Use a scratch register in preference to spilling operand.reg().
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(),
-           FieldOperand(operand.reg(), HeapObject::kMapOffset));
-    __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    temp.Unuse();
-    operand.Unuse();
-    destination()->Split(not_zero);
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
-      && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
-      && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
-      && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
-      && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
-}
-#endif
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceGetNamedValue(Register dst,
-                                 Register receiver,
-                                 Handle<String> name,
-                                 bool is_contextual)
-      : dst_(dst),
-        receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  if (!receiver_.is(eax)) {
-    __ mov(eax, receiver_);
-  }
-  __ Set(ecx, Immediate(name_));
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  RelocInfo::Mode mode = is_contextual_
-      ? RelocInfo::CODE_TARGET_CONTEXT
-      : RelocInfo::CODE_TARGET;
-  __ call(ic, mode);
-  // The call must be followed by:
-  // - a test eax instruction to indicate that the inobject property
-  //   case was inlined.
-  // - a mov ecx or mov edx instruction to indicate that the
-  //   contextual property load was inlined.
-  //
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use masm_-> instead of the __ macro since the
-  // latter can't return a value.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  Counters* counters = masm()->isolate()->counters();
-  if (is_contextual_) {
-    masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
-    __ IncrementCounter(counters->named_load_global_inline_miss(), 1);
-    if (is_dont_delete_) {
-      __ IncrementCounter(counters->dont_delete_hint_miss(), 1);
-    }
-  } else {
-    masm_->test(eax, Immediate(-delta_to_patch_site));
-    __ IncrementCounter(counters->named_load_inline_miss(), 1);
-  }
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetKeyedValue(Register dst,
-                                          Register receiver,
-                                          Register key)
-      : dst_(dst), receiver_(receiver), key_(key) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  if (!receiver_.is(eax)) {
-    // Register eax is available for key.
-    if (!key_.is(eax)) {
-      __ mov(eax, key_);
-    }
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-  } else if (!key_.is(edx)) {
-    // Register edx is available for receiver.
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-    if (!key_.is(eax)) {
-      __ mov(eax, key_);
-    }
-  } else {
-    __ xchg(edx, eax);
-  }
-  // Calculate the delta from the IC call instruction to the map check
-  // cmp instruction in the inlined version.  This delta is stored in
-  // a test(eax, delta) instruction after the call so that we can find
-  // it in the IC initialization code and patch the cmp instruction.
-  // This means that we cannot allow test instructions after calls to
-  // KeyedLoadIC stubs in other places.
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 Register scratch,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        scratch_(scratch),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  Register scratch_;
-  Label patch_site_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
-  // Move value_ to eax, key_ to ecx, and receiver_ to edx.
-  Register old_value = value_;
-
-  // First, move value to eax.
-  if (!value_.is(eax)) {
-    if (key_.is(eax)) {
-      // Move key_ out of eax, preferably to ecx.
-      if (!value_.is(ecx) && !receiver_.is(ecx)) {
-        __ mov(ecx, key_);
-        key_ = ecx;
-      } else {
-        __ mov(scratch_, key_);
-        key_ = scratch_;
-      }
-    }
-    if (receiver_.is(eax)) {
-      // Move receiver_ out of eax, preferably to edx.
-      if (!value_.is(edx) && !key_.is(edx)) {
-        __ mov(edx, receiver_);
-        receiver_ = edx;
-      } else {
-        // Both moves to scratch are from eax, also, no valid path hits both.
-        __ mov(scratch_, receiver_);
-        receiver_ = scratch_;
-      }
-    }
-    __ mov(eax, value_);
-    value_ = eax;
-  }
-
-  // Now value_ is in eax.  Move the other two to the right positions.
-  // We do not update the variables key_ and receiver_ to ecx and edx.
-  if (key_.is(ecx)) {
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-  } else if (key_.is(edx)) {
-    if (receiver_.is(ecx)) {
-      __ xchg(edx, ecx);
-    } else {
-      __ mov(ecx, key_);
-      if (!receiver_.is(edx)) {
-        __ mov(edx, receiver_);
-      }
-    }
-  } else {  // Key is not in edx or ecx.
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-    __ mov(ecx, key_);
-  }
-
-  // Call the IC stub.
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                    : Builtins::kKeyedStoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the
-  // __ macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC) register.
-  if (!old_value.is(eax)) __ mov(old_value, eax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-
-  Isolate* isolate = masm()->isolate();
-  Factory* factory = isolate->factory();
-  Counters* counters = isolate->counters();
-
-  bool contextual_load_in_builtin =
-      is_contextual &&
-      (isolate->bootstrapper()->IsActive() ||
-       (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
-  Result result;
-  // Do not inline in the global code or when not in loop.
-  if (scope()->is_global_scope() ||
-      loop_nesting() == 0 ||
-      contextual_load_in_builtin) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    frame()->Push(name);
-
-    RelocInfo::Mode mode = is_contextual
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    result = frame()->CallLoadIC(mode);
-    // A test eax instruction following the call signals that the inobject
-    // property case was inlined.  Ensure that there is not a test eax
-    // instruction here.
-    __ nop();
-  } else {
-    // Inline the property load.
-    Comment cmnt(masm(), is_contextual
-                         ? "[ Inlined contextual property load"
-                         : "[ Inlined named property load");
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(),
-                                           receiver.reg(),
-                                           name,
-                                           is_contextual);
-
-    if (!is_contextual) {
-      // Check that the receiver is a heap object.
-      __ test(receiver.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(zero);
-    }
-
-    __ bind(deferred->patch_site());
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                Immediate(factory->null_value()));
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    deferred->Branch(not_equal);
-
-    // The delta from the patch label to the actual load must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-           LoadIC::kOffsetToLoadInstruction);
-
-    if (is_contextual) {
-      // Load the (initialy invalid) cell and get its value.
-      masm()->mov(result.reg(), factory->null_value());
-      if (FLAG_debug_code) {
-        __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
-               factory->global_property_cell_map());
-        __ Assert(equal, "Uninitialized inlined contextual load");
-      }
-      __ mov(result.reg(),
-             FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
-      __ cmp(result.reg(), factory->the_hole_value());
-      deferred->Branch(equal);
-      bool is_dont_delete = false;
-      if (!info_->closure().is_null()) {
-        // When doing lazy compilation we can check if the global cell
-        // already exists and use its "don't delete" status as a hint.
-        AssertNoAllocation no_gc;
-        v8::internal::GlobalObject* global_object =
-            info_->closure()->context()->global();
-        LookupResult lookup;
-        global_object->LocalLookupRealNamedProperty(*name, &lookup);
-        if (lookup.IsProperty() && lookup.type() == NORMAL) {
-          ASSERT(lookup.holder() == global_object);
-          ASSERT(global_object->property_dictionary()->ValueAt(
-              lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
-          is_dont_delete = lookup.IsDontDelete();
-        }
-      }
-      deferred->set_is_dont_delete(is_dont_delete);
-      if (!is_dont_delete) {
-        __ cmp(result.reg(), factory->the_hole_value());
-        deferred->Branch(equal);
-      } else if (FLAG_debug_code) {
-        __ cmp(result.reg(), factory->the_hole_value());
-        __ Check(not_equal, "DontDelete cells can't contain the hole");
-      }
-      __ IncrementCounter(counters->named_load_global_inline(), 1);
-      if (is_dont_delete) {
-        __ IncrementCounter(counters->dont_delete_hint_hit(), 1);
-      }
-    } else {
-      // The initial (invalid) offset has to be large enough to force a 32-bit
-      // instruction encoding to allow patching with an arbitrary offset.  Use
-      // kMaxInt (minus kHeapObjectTag).
-      int offset = kMaxInt;
-      masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
-      __ IncrementCounter(counters->named_load_inline(), 1);
-    }
-
-    deferred->BindExit();
-  }
-  ASSERT(frame()->height() == original_height - 1);
-  return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // A test eax instruction following the call signals that the inobject
-    // property case was inlined.  Ensure that there is not a test eax
-    // instruction here.
-    __ nop();
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-    Label patch_site;
-
-    // Get the value and receiver from the stack.
-    Result value = frame()->Pop();
-    value.ToRegister();
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    // Allocate result register.
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
-    // Check that the receiver is a heap object.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    slow.Branch(zero, &value, &receiver);
-
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    __ bind(&patch_site);
-    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                Immediate(FACTORY->null_value()));
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    slow.Branch(not_equal, &value, &receiver);
-
-    // The delta from the patch label to the store offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
-           StoreIC::kOffsetToStoreInstruction);
-
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    __ mov(FieldOperand(receiver.reg(), offset), value.reg());
-    __ mov(result.reg(), Operand(value.reg()));
-
-    // Allocate scratch register for write barrier.
-    Result scratch = allocator()->Allocate();
-    ASSERT(scratch.is_valid());
-
-    // The write barrier clobbers all input registers, so spill the
-    // receiver and the value.
-    frame_->Spill(receiver.reg());
-    frame_->Spill(value.reg());
-
-    // If the receiver and the value share a register allocate a new
-    // register for the receiver.
-    if (receiver.reg().is(value.reg())) {
-      receiver = allocator()->Allocate();
-      ASSERT(receiver.is_valid());
-      __ mov(receiver.reg(), Operand(value.reg()));
-    }
-
-    // Update the write barrier. To save instructions in the inlined
-    // version we do not filter smis.
-    Label skip_write_barrier;
-    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
-    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
-    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
-    if (FLAG_debug_code) {
-      __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-      __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-      __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-    }
-    __ bind(&skip_write_barrier);
-    value.Unuse();
-    scratch.Unuse();
-    receiver.Unuse();
-    done.Jump(&result);
-
-    slow.Bind(&value, &receiver);
-    frame()->Push(&receiver);
-    frame()->Push(&value);
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // Encode the offset to the map check instruction and the offset
-    // to the write barrier store address computation in a test eax
-    // instruction.
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ test(eax,
-            Immediate((delta_to_record_write << 16) | delta_to_patch_site));
-    done.Bind(&result);
-  }
-
-  ASSERT_EQ(expected_height, frame()->height());
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Inline array load code if inside of a loop.  We do not know the
-  // receiver map yet, so we initially generate the code with a check
-  // against an invalid map.  In the inline cache code, we patch the map
-  // check if appropriate.
-  if (loop_nesting() > 0) {
-    Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    // If key and receiver are shared registers on the frame, their values will
-    // be automatically saved and restored when going to deferred code.
-    // The result is in elements, which is guaranteed non-shared.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(elements.reg(),
-                                           receiver.reg(),
-                                           key.reg());
-
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
-
-    // Check that the receiver has the expected map.
-    // Initially, use an invalid map. The map is patched in the IC
-    // initialization code.
-    __ bind(deferred->patch_site());
-    // Use masm-> here instead of the double underscore macro since extra
-    // coverage code can interfere with the patching.
-    masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-               Immediate(FACTORY->null_value()));
-    deferred->Branch(not_equal);
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ test(key.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
-    }
-
-    // Get the elements array from the receiver.
-    __ mov(elements.reg(),
-           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ AssertFastElements(elements.reg());
-
-    // Check that the key is within bounds.
-    __ cmp(key.reg(),
-           FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Load and check that the result is not the hole.
-    // Key holds a smi.
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-    __ mov(elements.reg(),
-           FieldOperand(elements.reg(),
-                        key.reg(),
-                        times_2,
-                        FixedArray::kHeaderSize));
-    result = elements;
-    __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
-    deferred->Branch(equal);
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    Comment cmnt(masm_, "[ Load from keyed Property");
-    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed load.  The explicit nop instruction is here because
-    // the push that follows might be peep-hole optimized away.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 2);
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
-    // Get the receiver, key and value into registers.
-    result = frame()->Pop();
-    Result key = frame()->Pop();
-    Result receiver = frame()->Pop();
-
-    Result tmp = allocator_->Allocate();
-    ASSERT(tmp.is_valid());
-    Result tmp2 = allocator_->Allocate();
-    ASSERT(tmp2.is_valid());
-
-    // Determine whether the value is a constant before putting it in a
-    // register.
-    bool value_is_constant = result.is_constant();
-
-    // Make sure that value, key and receiver are in registers.
-    result.ToRegister();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(result.reg(),
-                                           key.reg(),
-                                           receiver.reg(),
-                                           tmp.reg(),
-                                           strict_mode_flag());
-
-    // Check that the receiver is not a smi.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ test(key.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
-    }
-
-    // Check that the receiver is a JSArray.
-    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
-    deferred->Branch(not_equal);
-
-    // Get the elements array from the receiver and check that it is not a
-    // dictionary.
-    __ mov(tmp.reg(),
-           FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
-    // Check whether it is possible to omit the write barrier. If the elements
-    // array is in new space or the value written is a smi we can safely update
-    // the elements array without write barrier.
-    Label in_new_space;
-    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-    if (!value_is_constant) {
-      __ test(result.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    }
-
-    __ bind(&in_new_space);
-    // Bind the deferred code patch site to be able to locate the fixed
-    // array map comparison.  When debugging, we patch this comparison to
-    // always fail so that we will hit the IC call in the deferred code
-    // which will allow the debugger to break for fast case stores.
-    __ bind(deferred->patch_site());
-    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->fixed_array_map()));
-    deferred->Branch(not_equal);
-
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis (because the fixed array check above ensures the
-    // elements are in fast case). Use unsigned comparison to handle negative
-    // keys.
-    __ cmp(key.reg(),
-           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Store the value.
-    __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    result = frame()->CallKeyedStoreIC(strict_mode_flag());
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed store.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 3);
-  return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>::cast(raw_name->handle());
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-
-  // Record the source position for the property load.
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      if (!persist_after_get_) set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      if (persist_after_get_) cgen_->frame()->Dup();
-      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
-      if (!persist_after_get_) set_unloaded();
-      cgen_->frame()->Push(&result);
-      break;
-    }
-
-    case KEYED: {
-      if (persist_after_get_) {
-        cgen_->frame()->PushElementAt(1);
-        cgen_->frame()->PushElementAt(1);
-      }
-      Result value = cgen_->EmitKeyedLoad();
-      cgen_->frame()->Push(&value);
-      if (!persist_after_get_) set_unloaded();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void Reference::TakeValue() {
-  // For non-constant frame-allocated slots, we invalidate the value in the
-  // slot.  For all others, we fall back on GetValue.
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(!is_illegal());
-  if (type_ != SLOT) {
-    GetValue();
-    return;
-  }
-
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP ||
-      slot->type() == Slot::CONTEXT ||
-      slot->var()->mode() == Variable::CONST ||
-      slot->is_arguments()) {
-    GetValue();
-    return;
-  }
-
-  // Only non-constant, frame-allocated parameters and locals can
-  // reach here. Be careful not to use the optimizations for arguments
-  // object access since it may not have been initialized yet.
-  ASSERT(!slot->is_arguments());
-  if (slot->type() == Slot::PARAMETER) {
-    cgen_->frame()->TakeParameterAt(slot->index());
-  } else {
-    ASSERT(slot->type() == Slot::LOCAL);
-    cgen_->frame()->TakeLocalAt(slot->index());
-  }
-
-  ASSERT(persist_after_get_);
-  // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      Result answer = cgen_->EmitNamedStore(GetName(), false);
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression()->AsProperty();
-      ASSERT(property != NULL);
-
-      Result answer = cgen_->EmitKeyedStore(property->key()->type());
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case UNLOADED:
-    case ILLEGAL:
-      UNREACHABLE();
-  }
-}
-
-
-#undef __
-
 #define __ masm.
 
-
 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
   memcpy(dest, src, size);
 }
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index acd651b..8f090b1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,275 +30,18 @@
 
 #include "ast.h"
 #include "ic-inl.h"
-#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class FrameRegisterState;
-class RegisterAllocator;
-class RegisterFile;
-class RuntimeCallHelper;
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue and SetValue.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  The value of the reference may be invalidated,
-  // causing subsequent attempts to read it to fail.
-  void TakeValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through.  The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally.  Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
-  ControlDestination(JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool true_is_fall_through)
-      : true_target_(true_target),
-        false_target_(false_target),
-        true_is_fall_through_(true_is_fall_through),
-        is_used_(false) {
-    ASSERT(true_is_fall_through ? !true_target->is_bound()
-                                : !false_target->is_bound());
-  }
-
-  // Accessors for the jump targets.  Directly jumping or branching to
-  // or binding the targets will not update the destination's state.
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
-
-  // True if the the destination has been jumped to unconditionally or
-  // control has been split to both targets.  This predicate does not
-  // test whether the targets have been extracted and manipulated as
-  // raw jump targets.
-  bool is_used() const { return is_used_; }
-
-  // True if the destination is used and the true target (respectively
-  // false target) was the fall through.  If the target is backward,
-  // "fall through" included jumping unconditionally to it.
-  bool true_was_fall_through() const {
-    return is_used_ && true_is_fall_through_;
-  }
-
-  bool false_was_fall_through() const {
-    return is_used_ && !true_is_fall_through_;
-  }
-
-  // Emit a branch to one of the true or false targets, and bind the
-  // other target.  Because this binds the fall-through target, it
-  // should be emitted in tail position (as the last thing when
-  // compiling an expression).
-  void Split(Condition cc) {
-    ASSERT(!is_used_);
-    if (true_is_fall_through_) {
-      false_target_->Branch(NegateCondition(cc));
-      true_target_->Bind();
-    } else {
-      true_target_->Branch(cc);
-      false_target_->Bind();
-    }
-    is_used_ = true;
-  }
-
-  // Emit an unconditional jump in tail position, to the true target
-  // (if the argument is true) or the false target.  The "jump" will
-  // actually bind the jump target if it is forward, jump to it if it
-  // is backward.
-  void Goto(bool where) {
-    ASSERT(!is_used_);
-    JumpTarget* target = where ? true_target_ : false_target_;
-    if (target->is_bound()) {
-      target->Jump();
-    } else {
-      target->Bind();
-    }
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Mark this jump target as used as if Goto had been called, but
-  // without generating a jump or binding a label (the control effect
-  // should have already happened).  This is used when the left
-  // subexpression of the short-circuit boolean operators are
-  // compiled.
-  void Use(bool where) {
-    ASSERT(!is_used_);
-    ASSERT((where ? true_target_ : false_target_)->is_bound());
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Swap the true and false targets but keep the same actual label as
-  // the fall through.  This is used when compiling negated
-  // expressions, where we want to swap the targets but preserve the
-  // state.
-  void Invert() {
-    JumpTarget* temp_target = true_target_;
-    true_target_ = false_target_;
-    false_target_ = temp_target;
-
-    true_is_fall_through_ = !true_is_fall_through_;
-  }
-
- private:
-  // True and false jump targets.
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-
-  // Before using the destination: true if the true target is the
-  // preferred fall through, false if the false target is.  After
-  // using the destination: true if the true target was actually used
-  // as the fall through, false if the false target was.
-  bool true_is_fall_through_;
-
-  // True if the Split or Goto functions have been called.
-  bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair).  It is threaded through
-// the call stack.  Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own control destination.
-  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  ~CodeGenState();
-
-  // Accessors for the state.
-  ControlDestination* destination() const { return destination_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-  // A control destination in case the expression has a control-flow
-  // effect.
-  ControlDestination* destination_;
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode.
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
 
 // -------------------------------------------------------------------------
 // CodeGenerator
 
-class CodeGenerator: public AstVisitor {
+class CodeGenerator {
  public:
-  static bool MakeCode(CompilationInfo* info);
-
   // Printing of AST, etc. as requested by flags.
   static void MakeCodePrologue(CompilationInfo* info);
 
@@ -318,33 +61,7 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
 
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
-  bool in_spilled_code() const { return in_spilled_code_; }
-  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
-  // Return a position of the element at |index_as_smi| + |additional_offset|
-  // in FixedArray pointer to which is held in |array|.  |index_as_smi| is Smi.
   static Operand FixedArrayElementOperand(Register array,
                                           Register index_as_smi,
                                           int additional_offset = 0) {
@@ -353,445 +70,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  // State
-  ControlDestination* destination() const { return state_->destination(); }
-
-  // Control of side-effect-free int32 expression compilation.
-  bool in_safe_int32_mode() { return in_safe_int32_mode_; }
-  void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
-  bool safe_int32_mode_enabled() {
-    return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
-  }
-  void set_safe_int32_mode_enabled(bool value) {
-    safe_int32_mode_enabled_ = value;
-  }
-  void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
-    unsafe_bailout_ = unsafe_bailout;
-  }
-
-  // Take the Result that is an untagged int32, and convert it to a tagged
-  // Smi or HeapNumber.  Remove the untagged_int32 flag from the result.
-  void ConvertInt32ResultToNumber(Result* value);
-  void ConvertInt32ResultToSmi(Result* value);
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).
-  void GenerateReturnSequence(Result* return_value);
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  Result StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-
-  Operand SlotOperand(Slot* slot, Register tmp);
-
-  Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Result tmp,
-                                            JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* expr,
-                     ControlDestination* destination,
-                     bool force_control);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver();
-
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression);
-
-  // Evaluate an expression and place its value on top of the frame,
-  // using, or not using, the side-effect-free expression compiler.
-  void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
-  void LoadWithSafeInt32ModeDisabled(Expression* expr);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
-  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                           TypeofState typeof_state,
-                                           JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Result* result,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the expression stack into a slot, leaving the
-  // value in place.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Receiver is passed on the frame and consumed.
-  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // If the store is contextual, value is passed on the frame and consumed.
-  // Otherwise, receiver and value are passed on the frame and consumed.
-  Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Receiver and key are passed on the frame and consumed.
-  Result EmitKeyedLoad();
-
-  // Receiver, key, and value are passed on the frame and consumed.
-  Result EmitKeyedStore(StaticType* key_type);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Translate the value on top of the frame into control flow to the
-  // control destination.
-  void ToBoolean(ControlDestination* destination);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(BinaryOperation* expr,
-                              OverwriteMode overwrite_mode);
-
-  // Emits code sequence that jumps to a JumpTarget if the inputs
-  // are both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  // Allocates a temporary register, possibly spilling from the frame,
-  // if it needs to check both left and right.
-  void JumpIfBothSmiUsingTypeInfo(Result* left,
-                                  Result* right,
-                                  JumpTarget* both_smi);
-
-  // Emits code sequence that jumps to deferred code if the inputs
-  // are not both smis.  Cannot be in MacroAssembler because it takes
-  // a deferred code object.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     Register scratch,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     DeferredCode* deferred);
-
-  // Emits code sequence that jumps to the label if the inputs
-  // are not both smis.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     Register scratch,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     Label* on_non_smi);
-
-  // If possible, combine two constant smi values using op to produce
-  // a smi result, and push it on the virtual frame, all at compile time.
-  // Returns true if it succeeds.  Otherwise it has no effect.
-  bool FoldConstantSmis(Token::Value op, int left, int right);
-
-  // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result operand.
-  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                    Result* operand,
-                                    Handle<Object> constant_operand,
-                                    bool reversed,
-                                    OverwriteMode overwrite_mode);
-
-  // Emit code to perform a binary operation on two likely smis.
-  // The code to handle smi arguments is produced inline.
-  // Consumes the Results left and right.
-  Result LikelySmiBinaryOperation(BinaryOperation* expr,
-                                  Result* left,
-                                  Result* right,
-                                  OverwriteMode overwrite_mode);
-
-
-  // Emit code to perform a binary operation on two untagged int32 values.
-  // The values are on top of the frame, and the result is pushed on the frame.
-  void Int32BinaryOperation(BinaryOperation* node);
-
-
-  // Generate a stub call from the virtual frame.
-  Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                         Result* left,
-                                         Result* right);
-
-  void Comparison(AstNode* node,
-                  Condition cc,
-                  bool strict,
-                  ControlDestination* destination);
-
-  // If at least one of the sides is a constant smi, generate optimized code.
-  void ConstantSmiComparison(Condition cc,
-                             bool strict,
-                             ControlDestination* destination,
-                             Result* left_side,
-                             Result* right_side,
-                             bool left_side_constant_smi,
-                             bool right_side_constant_smi,
-                             bool is_loop_condition);
-
-  void GenerateInlineNumberComparison(Result* left_side,
-                                      Result* right_side,
-                                      Condition cc,
-                                      ControlDestination* dest);
-
-  // To prevent long attacker-controlled byte sequences, integer constants
-  // from the JavaScript source are loaded in two parts if they are larger
-  // than 17 bits.
-  static const int kMaxSmiInlinedBits = 17;
-  bool IsUnsafeSmi(Handle<Object> value);
-  // Load an integer constant x into a register target or into the stack using
-  // at most 16 bits of user-controlled data per assembly operation.
-  void MoveUnsafeSmi(Register target, Handle<Object> value);
-  void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
-  void PushUnsafeSmi(Handle<Object> value);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  Result InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                             bool pretenure);
-
-  // Support for types.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  // Construct a RegExp exec result with two in-object properties.
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements. Takes three expressions, the object and two
-  // indices. This should only be used if the indices are known to be
-  // non-negative and within bounds of the elements array at the call site.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  // Check whether two RegExps are equivalent.
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* stmt);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.  There should
-  // be no frame-external references to (non-reserved) registers.
-  bool HasValidEntryRegisters();
-#endif
-
-  ZoneList<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  CodeGenState* state_;
-  int loop_nesting_;
-  bool in_safe_int32_mode_;
-  bool safe_int32_mode_enabled_;
-
-  // Jump targets.
-  // The target of the return from the function.
-  BreakTarget function_return_;
-  // The target of the bailout from a side-effect-free int32 subexpression.
-  BreakTarget* unsafe_bailout_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  // True when we are in code that expects the virtual frame to be fully
-  // spilled.  Some virtual frame function are disabled in DEBUG builds when
-  // called from spilled code, because they do not leave the virtual frame
-  // in a spilled state.
-  bool in_spilled_code_;
-
-  // A cookie that is used for JIT IMM32 Encoding.  Initialized to a
-  // random number when the command-line
-  // FLAG_mask_constants_with_cookie is true, zero otherwise.
-  int jit_cookie_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class Result;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class LCodeGen;
-
-  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
-  friend class InlineRuntimeFunctionsTable;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 33c5251..2389948 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3f72def..aeee584 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -231,7 +231,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       NearLabel ok;
       ExternalReference stack_limit =
           ExternalReference::address_of_stack_limit(isolate());
@@ -773,7 +773,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -801,7 +801,7 @@
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target()->entry_label());
+      __ jmp(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -812,7 +812,7 @@
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target()->entry_label());
+    __ jmp(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -822,14 +822,14 @@
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_target());
   } else {
-    __ jmp(default_clause->body_target()->entry_label());
+    __ jmp(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -3835,7 +3835,7 @@
   if (assign_type == VARIABLE) {
     PrepareForBailout(expr->expression(), TOS_REG);
   } else {
-    PrepareForBailout(expr->increment(), TOS_REG);
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
   }
 
   // Call ToNumber only if operand is not a smi.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 48ffc73..b7af03c 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
deleted file mode 100644
index 76c0d02..0000000
--- a/src/ia32/jump-target-ia32.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There is an expected frame to merge to.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else if (entry_frame_ != NULL) {
-    // Forward jump with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and jump to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else {
-    // Forward jump.  Remember the current frame and emit a jump to
-    // its merge code.
-    AddReachingFrame(cgen()->frame());
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-    __ jmp(&merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint hint) {
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
-    // Backward branch.  We have an expected frame to merge to on the
-    // backward edge.
-
-    // Swap the current frame for a copy (we do the swapping to get
-    // the off-frame registers off the fall through) to use for the
-    // branch.
-    VirtualFrame* fall_through_frame = cgen()->frame();
-    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
-    RegisterFile non_frame_registers;
-    cgen()->SetFrame(branch_frame, &non_frame_registers);
-
-    // Check if we can avoid merge code.
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen()->frame()->Equals(entry_frame_)) {
-      // Branch right in to the block.
-      cgen()->DeleteFrame();
-      __ j(cc, &entry_label_, hint);
-      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-      return;
-    }
-
-    // Check if we can reuse existing merge code.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (reaching_frames_[i] != NULL &&
-          cgen()->frame()->Equals(reaching_frames_[i])) {
-        // Branch to the merge code.
-        cgen()->DeleteFrame();
-        __ j(cc, &merge_labels_[i], hint);
-        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-        return;
-      }
-    }
-
-    // To emit the merge code here, we negate the condition and branch
-    // around the merge code on the fall through path.
-    Label original_fall_through;
-    __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-    __ bind(&original_fall_through);
-
-  } else if (entry_frame_ != NULL) {
-    // Forward branch with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and branch to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    // Explicitly use the macro assembler instead of __ as forward
-    // branches are expected to be a fixed size (no inserted
-    // coverage-checking instructions please).  This is used in
-    // Reference::GetValue.
-    cgen()->masm()->j(cc, &entry_label_, hint);
-
-  } else {
-    // Forward branch.  A copy of the current frame is remembered and
-    // a branch to the merge code is emitted.  Explicitly use the
-    // macro assembler instead of __ as forward branches are expected
-    // to be a fixed size (no inserted coverage-checking instructions
-    // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen()->frame()));
-    cgen()->masm()->j(cc, &merge_labels_.last(), hint);
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  cgen()->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  // We do not expect a call with a preconfigured entry frame.
-  ASSERT(entry_frame_ == NULL);
-  AddReachingFrame(target_frame);
-  __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(cgen() != NULL);
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  // Fast case: the jump target was manually configured with an entry
-  // frame to use.
-  if (entry_frame_ != NULL) {
-    // Assert no reaching frames to deal with.
-    ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen()->has_valid_frame());
-
-    RegisterFile empty;
-    if (direction_ == BIDIRECTIONAL) {
-      // Copy the entry frame so the original can be used for a
-      // possible backward jump.
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    } else {
-      // Take ownership of the entry frame.
-      cgen()->SetFrame(entry_frame_, &empty);
-      entry_frame_ = NULL;
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (!is_linked()) {
-    ASSERT(cgen()->has_valid_frame());
-    if (direction_ == FORWARD_ONLY) {
-      // Fast case: no forward jumps and no possible backward jumps.
-      // The stack pointer can be floating above the top of the
-      // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen()->frame();
-      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-      if (difference > 0) {
-        frame->stack_pointer_ -= difference;
-        __ add(Operand(esp), Immediate(difference * kPointerSize));
-      }
-    } else {
-      ASSERT(direction_ == BIDIRECTIONAL);
-      // Fast case: no forward jumps, possible backward ones.  Remove
-      // constants and copies above the watermark on the fall-through
-      // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable();
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (direction_ == FORWARD_ONLY &&
-      !cgen()->has_valid_frame() &&
-      reaching_frames_.length() == 1) {
-    // Fast case: no fall-through, a single forward jump, and no
-    // possible backward jumps.  Pick up the only reaching frame, take
-    // ownership of it, and use it for the block about to be emitted.
-    VirtualFrame* frame = reaching_frames_[0];
-    RegisterFile empty;
-    cgen()->SetFrame(frame, &empty);
-    reaching_frames_[0] = NULL;
-    __ bind(&merge_labels_[0]);
-
-    // The stack pointer can be floating above the top of the
-    // virtual frame before the bind.  Afterward, it should not.
-    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-    if (difference > 0) {
-      frame->stack_pointer_ -= difference;
-      __ add(Operand(esp), Immediate(difference * kPointerSize));
-    }
-
-    __ bind(&entry_label_);
-    return;
-  }
-
-  // If there is a current frame, record it as the fall-through.  It
-  // is owned by the reaching frames for now.
-  bool had_fall_through = false;
-  if (cgen()->has_valid_frame()) {
-    had_fall_through = true;
-    AddReachingFrame(cgen()->frame());  // Return value ignored.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-
-  // Compute the frame to use for entry to the block.
-  ComputeEntryFrame();
-
-  // Some moves required to merge to an expected frame require purely
-  // frame state changes, and do not require any code generation.
-  // Perform those first to increase the possibility of finding equal
-  // frames below.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    if (reaching_frames_[i] != NULL) {
-      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
-    }
-  }
-
-  if (is_linked()) {
-    // There were forward jumps.  Handle merging the reaching frames
-    // to the entry frame.
-
-    // Loop over the (non-null) reaching frames and process any that
-    // need merge code.  Iterate backwards through the list to handle
-    // the fall-through frame first.  Set frames that will be
-    // processed after 'i' to NULL if we want to avoid processing
-    // them.
-    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
-      VirtualFrame* frame = reaching_frames_[i];
-
-      if (frame != NULL) {
-        // Does the frame (probably) need merge code?
-        if (!frame->Equals(entry_frame_)) {
-          // We could have a valid frame as the fall through to the
-          // binding site or as the fall through from a previous merge
-          // code block.  Jump around the code we are about to
-          // generate.
-          if (cgen()->has_valid_frame()) {
-            cgen()->DeleteFrame();
-            __ jmp(&entry_label_);
-          }
-          // Pick up the frame for this block.  Assume ownership if
-          // there cannot be backward jumps.
-          RegisterFile empty;
-          if (direction_ == BIDIRECTIONAL) {
-            cgen()->SetFrame(new VirtualFrame(frame), &empty);
-          } else {
-            cgen()->SetFrame(frame, &empty);
-            reaching_frames_[i] = NULL;
-          }
-          __ bind(&merge_labels_[i]);
-
-          // Loop over the remaining (non-null) reaching frames,
-          // looking for any that can share merge code with this one.
-          for (int j = 0; j < i; j++) {
-            VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen()->frame())) {
-              // Set the reaching frame element to null to avoid
-              // processing it later, and then bind its entry label.
-              reaching_frames_[j] = NULL;
-              __ bind(&merge_labels_[j]);
-            }
-          }
-
-          // Emit the merge code.
-          cgen()->frame()->MergeTo(entry_frame_);
-        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
-          // If this is the fall through frame, and it didn't need
-          // merge code, we need to pick up the frame so we can jump
-          // around subsequent merge blocks if necessary.
-          RegisterFile empty;
-          cgen()->SetFrame(frame, &empty);
-          reaching_frames_[i] = NULL;
-        }
-      }
-    }
-
-    // The code generator may not have a current frame if there was no
-    // fall through and none of the reaching frames needed merging.
-    // In that case, clone the entry frame as the current frame.
-    if (!cgen()->has_valid_frame()) {
-      RegisterFile empty;
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    }
-
-    // There may be unprocessed reaching frames that did not need
-    // merge code.  They will have unbound merge labels.  Bind their
-    // merge labels to be the same as the entry label and deallocate
-    // them.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (!merge_labels_[i].is_bound()) {
-        reaching_frames_[i] = NULL;
-        __ bind(&merge_labels_[i]);
-      }
-    }
-
-    // There are non-NULL reaching frames with bound labels for each
-    // merge block, but only on backward targets.
-  } else {
-    // There were no forward jumps.  There must be a current frame and
-    // this must be a bidirectional target.
-    ASSERT(reaching_frames_.length() == 1);
-    ASSERT(reaching_frames_[0] != NULL);
-    ASSERT(direction_ == BIDIRECTIONAL);
-
-    // Use a copy of the reaching frame so the original can be saved
-    // for possible reuse as a backward merge block.
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
-    __ bind(&merge_labels_[0]);
-    cgen()->frame()->MergeTo(entry_frame_);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-  }
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 2c5541b..0f96f78 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -408,20 +408,21 @@
 }
 
 
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr,
-                        bool adjusted) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               ContextMode context_mode,
+                               SafepointMode safepoint_mode) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (!adjusted) {
+  if (context_mode == RESTORE_CONTEXT) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   }
   __ call(code, mode);
 
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
@@ -432,25 +433,44 @@
 }
 
 
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr,
+                        ContextMode context_mode) {
+  CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
 void LCodeGen::CallRuntime(const Runtime::Function* fun,
                            int argc,
                            LInstruction* instr,
-                           bool adjusted) {
+                           ContextMode context_mode) {
   ASSERT(instr != NULL);
   ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (!adjusted) {
+  if (context_mode == RESTORE_CONTEXT) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   }
   __ CallRuntime(fun, argc);
 
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -462,8 +482,16 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        0,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -612,6 +640,7 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(kind == expected_safepoint_kind_);
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
@@ -697,38 +726,38 @@
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpConstructResult: {
       RegExpConstructResultStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::RegExpExec: {
       RegExpExecStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::SubString: {
       SubStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::StringAdd: {
       StringAddStub stub(NO_STRING_ADD_FLAGS);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::StringCompare: {
       StringCompareStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::TranscendentalCache: {
       TranscendentalCacheStub stub(instr->transcendental_type(),
                                    TranscendentalCacheStub::TAGGED);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     default:
@@ -1143,7 +1172,7 @@
 
 void LCodeGen::DoThrow(LThrow* instr) {
   __ push(ToOperand(instr->InputAt(0)));
-  CallRuntime(Runtime::kThrow, 1, instr, false);
+  CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
 
   if (FLAG_debug_code) {
     Comment("Unreachable code.");
@@ -1218,7 +1247,7 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -1330,12 +1359,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ pushad();
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ popad();
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 void LCodeGen::DoGoto(LGoto* instr) {
@@ -1837,7 +1862,7 @@
   // Object and function are in fixed registers defined by the stub.
   ASSERT(ToRegister(instr->context()).is(esi));
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 
   NearLabel true_value, done;
   __ test(eax, Operand(eax));
@@ -1856,7 +1881,7 @@
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
   __ test(eax, Operand(eax));
   EmitBranch(true_block, false_block, zero);
 }
@@ -1928,7 +1953,7 @@
 
 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                 Label* map_check) {
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
@@ -1939,20 +1964,24 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(flags);
 
-  // Get the temp register reserved by the instruction. This needs to be edi as
-  // its slot of the pushing of safepoint registers is used to communicate the
-  // offset to the location of the map check.
+  // Get the temp register reserved by the instruction. This needs to be a
+  // register which is pushed last by PushSafepointRegisters as top of the
+  // stack is used to pass the offset to the location of the map check to
+  // the stub.
   Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(edi));
+  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   __ mov(InstanceofStub::right(), Immediate(instr->function()));
   static const int kAdditionalDelta = 16;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
   __ StoreToSafepointRegisterSlot(temp, temp);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RESTORE_CONTEXT,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   // Put the result value into the eax slot and restore all registers.
   __ StoreToSafepointRegisterSlot(eax, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -1980,7 +2009,7 @@
   Token::Value op = instr->op();
 
   Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 
   Condition condition = ComputeCompareCondition(op);
   if (op == Token::GT || op == Token::LTE) {
@@ -2003,7 +2032,7 @@
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 
   // The compare stub expects compare condition and the input operands
   // reversed for GT and LTE.
@@ -2051,7 +2080,7 @@
   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
                                                RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, mode, instr);
+  CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2079,8 +2108,10 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2146,7 +2177,7 @@
     ASSERT(instr->hydrogen()->need_generic());
     __ mov(ecx, name);
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+    CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else {
     NearLabel done;
     for (int i = 0; i < map_count - 1; ++i) {
@@ -2168,7 +2199,7 @@
       __ bind(&generic);
       __ mov(ecx, name);
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+      CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
       EmitLoadField(result, object, map, name);
@@ -2185,7 +2216,7 @@
 
   __ mov(ecx, instr->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2348,7 +2379,7 @@
   ASSERT(ToRegister(instr->key()).is(eax));
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2536,7 +2567,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
@@ -2558,7 +2589,7 @@
   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   Label negative;
   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2579,10 +2610,8 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(eax)) __ mov(tmp, eax);
 
@@ -2598,7 +2627,6 @@
   __ StoreToSafepointRegisterSlot(input_reg, tmp);
 
   __ bind(&done);
-  __ PopSafepointRegisters();
 }
 
 
@@ -2820,7 +2848,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -2828,7 +2856,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -2873,7 +2901,7 @@
   int arity = instr->arity();
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2885,7 +2913,7 @@
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeCallInitialize(arity, NOT_IN_LOOP);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2895,7 +2923,7 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
   __ Drop(1);
 }
 
@@ -2908,7 +2936,7 @@
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeCallInitialize(arity, NOT_IN_LOOP);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2926,12 +2954,12 @@
 
   Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ Set(eax, Immediate(instr->arity()));
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
 }
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  CallRuntime(instr->function(), instr->arity(), instr, false);
+  CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
 }
 
 
@@ -2971,10 +2999,10 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3071,10 +3099,10 @@
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->value()).is(eax));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3192,7 +3220,7 @@
   // contained in the register pointer map.
   __ Set(result, Immediate(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3205,16 +3233,12 @@
     __ SmiTag(index);
     __ push(index);
   }
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(eax);
   }
   __ SmiUntag(eax);
   __ StoreToSafepointRegisterSlot(result, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3257,14 +3281,11 @@
   // contained in the register pointer map.
   __ Set(result, Immediate(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ SmiTag(char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(result, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3311,7 +3332,7 @@
   Register tmp = reg.is(eax) ? ecx : eax;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
@@ -3333,10 +3354,7 @@
   // integer value.
   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
 
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   if (!reg.is(eax)) __ mov(reg, eax);
 
   // Done. Put the value in xmm0 into the value of the allocated heap
@@ -3344,7 +3362,6 @@
   __ bind(&done);
   __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
   __ StoreToSafepointRegisterSlot(reg, reg);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3380,13 +3397,9 @@
   Register reg = ToRegister(instr->result());
   __ Set(reg, Immediate(0));
 
-  __ PushSafepointRegisters();
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   __ StoreToSafepointRegisterSlot(reg, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3801,16 +3814,16 @@
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
   } else {
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   }
 }
 
@@ -3832,9 +3845,12 @@
 
   // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
   } else {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow,
+                4,
+                instr,
+                CONTEXT_ADJUSTED);
   }
 }
 
@@ -3842,7 +3858,7 @@
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
   __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
+  CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3867,7 +3883,7 @@
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   __ push(Immediate(instr->hydrogen()->pattern()));
   __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -3879,7 +3895,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -3907,14 +3923,14 @@
     FastNewClosureStub stub(
         shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ push(Immediate(shared_info));
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else {
     __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(Immediate(shared_info));
     __ push(Immediate(pretenure
                       ? factory()->true_value()
                       : factory()->false_value()));
-    CallRuntime(Runtime::kNewClosure, 3, instr, false);
+    CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
   }
 }
 
@@ -3926,7 +3942,7 @@
   } else {
     __ push(ToOperand(input));
   }
-  CallRuntime(Runtime::kTypeof, 1, instr, false);
+  CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
 }
 
 
@@ -4129,7 +4145,7 @@
   __ j(above_equal, &done);
 
   StackCheckStub stub;
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   __ bind(&done);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 4414e6a..6d42cd7 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -61,7 +61,8 @@
         deferred_(8),
         osr_pc_offset_(-1),
         deoptimization_reloc_size(),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -129,7 +130,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -164,16 +165,44 @@
   bool GenerateRelocPadding();
   bool GenerateSafepointTable();
 
-  void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
-                bool adjusted = true);
-  void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
-                   bool adjusted = true);
-  void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
-                   bool adjusted = true) {
+  enum ContextMode {
+    RESTORE_CONTEXT,
+    CONTEXT_ADJUSTED
+  };
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr,
+                ContextMode context_mode);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       ContextMode context_mode,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* fun,
+                   int argc,
+                   LInstruction* instr,
+                   ContextMode context_mode);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int argc,
+                   LInstruction* instr,
+                   ContextMode context_mode) {
     const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, argc, instr, adjusted);
+    CallRuntime(function, argc, instr, context_mode);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -182,7 +211,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode);
+
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -281,6 +312,27 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 29e1424..9ccd189 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1120,9 +1120,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index fe7681b..9ace8f8 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1346,6 +1346,7 @@
   LOperand* global_object() { return InputAt(1); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(2); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1694,6 +1695,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1755,6 +1757,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -1762,6 +1765,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
   LOperand* value() { return inputs_[3]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 4055498..ad567bc 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 #include "serialize.h"
@@ -1030,19 +1030,6 @@
 }
 
 
-void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
-                                      Register result,
-                                      Register op,
-                                      JumpTarget* then_target) {
-  JumpTarget ok;
-  test(result, Operand(result));
-  ok.Branch(not_zero, taken);
-  test(op, Operand(op));
-  then_target->Branch(sign, not_taken);
-  ok.Bind();
-}
-
-
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 946022a..6909272 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,7 +50,6 @@
 typedef Operand MemOperand;
 
 // Forward declaration.
-class JumpTarget;
 class PostCallGenerator;
 
 // MacroAssembler implements a collection of frequently used macros.
@@ -424,12 +423,6 @@
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
-  // Check if result is zero and op is negative in code using jump targets.
-  void NegativeZeroTest(CodeGenerator* cgen,
-                        Register result,
-                        Register op,
-                        JumpTarget* then_target);
-
   // Check if result is zero and any of op1 and op2 are negative.
   // Register scratch is destroyed, and it must be different from op2.
   void NegativeZeroTest(Register result, Register op1, Register op2,
@@ -642,6 +635,10 @@
                                            Register scratch2,
                                            Label* on_not_flat_ascii_strings);
 
+  static int SafepointRegisterStackIndex(Register reg) {
+    return SafepointRegisterStackIndex(reg.code());
+  }
+
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
deleted file mode 100644
index 99ae6eb..0000000
--- a/src/ia32/register-allocator-ia32-inl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  // The code for this test relies on the order of register codes.
-  return reg.code() >= esp.code() && reg.code() <= esi.code();
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.  The mapping is:
-
-// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // eax
-    2,   // ecx
-    3,   // edx
-    1,   // ebx
-    -1,  // esp
-    -1,  // ebp
-    -1,  // esi
-    4    // edi
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-  // The non-reserved edi register is live on JS function entry.
-  Use(edi);  // JS function.
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
deleted file mode 100644
index 6db13d4..0000000
--- a/src/ia32/register-allocator-ia32.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  ASSERT(is_valid());
-  if (is_constant()) {
-    CodeGenerator* code_generator =
-        CodeGeneratorScope::Current(Isolate::Current());
-    Result fresh = code_generator->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    if (is_untagged_int32()) {
-      fresh.set_untagged_int32(true);
-      if (handle()->IsSmi()) {
-      code_generator->masm()->Set(
-          fresh.reg(),
-          Immediate(Smi::cast(*handle())->value()));
-      } else if (handle()->IsHeapNumber()) {
-        double double_value = HeapNumber::cast(*handle())->value();
-        int32_t value = DoubleToInt32(double_value);
-        if (double_value == 0 && signbit(double_value)) {
-          // Negative zero must not be converted to an int32 unless
-          // the context allows it.
-          code_generator->unsafe_bailout_->Branch(equal);
-          code_generator->unsafe_bailout_->Branch(not_equal);
-        } else if (double_value == value) {
-          code_generator->masm()->Set(fresh.reg(), Immediate(value));
-        } else {
-          code_generator->unsafe_bailout_->Branch(equal);
-          code_generator->unsafe_bailout_->Branch(not_equal);
-        }
-      } else {
-        // Constant is not a number.  This was not predicted by AST analysis.
-        code_generator->unsafe_bailout_->Branch(equal);
-        code_generator->unsafe_bailout_->Branch(not_equal);
-      }
-    } else if (code_generator->IsUnsafeSmi(handle())) {
-      code_generator->MoveUnsafeSmi(fresh.reg(), handle());
-    } else {
-      code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
-    }
-    // This result becomes a copy of the fresh one.
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  }
-  ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
-  CodeGenerator* code_generator =
-      CodeGeneratorScope::Current(Isolate::Current());
-  ASSERT(is_valid());
-  if (!is_register() || !reg().is(target)) {
-    Result fresh = code_generator->allocator()->Allocate(target);
-    ASSERT(fresh.is_valid());
-    if (is_register()) {
-      code_generator->masm()->mov(fresh.reg(), reg());
-    } else {
-      ASSERT(is_constant());
-      if (is_untagged_int32()) {
-        if (handle()->IsSmi()) {
-          code_generator->masm()->Set(
-              fresh.reg(),
-              Immediate(Smi::cast(*handle())->value()));
-        } else {
-          ASSERT(handle()->IsHeapNumber());
-          double double_value = HeapNumber::cast(*handle())->value();
-          int32_t value = DoubleToInt32(double_value);
-          if (double_value == 0 && signbit(double_value)) {
-            // Negative zero must not be converted to an int32 unless
-            // the context allows it.
-            code_generator->unsafe_bailout_->Branch(equal);
-            code_generator->unsafe_bailout_->Branch(not_equal);
-          } else if (double_value == value) {
-            code_generator->masm()->Set(fresh.reg(), Immediate(value));
-          } else {
-            code_generator->unsafe_bailout_->Branch(equal);
-            code_generator->unsafe_bailout_->Branch(not_equal);
-          }
-        }
-      } else {
-        if (code_generator->IsUnsafeSmi(handle())) {
-          code_generator->MoveUnsafeSmi(fresh.reg(), handle());
-        } else {
-          code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
-        }
-      }
-    }
-    fresh.set_type_info(type_info());
-    fresh.set_untagged_int32(is_untagged_int32());
-    *this = fresh;
-  } else if (is_register() && reg().is(target)) {
-    ASSERT(code_generator->has_valid_frame());
-    code_generator->frame()->Spill(target);
-    ASSERT(code_generator->allocator()->count(target) == 1);
-  }
-  ASSERT(is_register());
-  ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  Result result = AllocateWithoutSpilling();
-  // Check that the register is a byte register.  If not, unuse the
-  // register if valid and return an invalid result.
-  if (result.is_valid() && !result.reg().is_byte_register()) {
-    result.Unuse();
-    return Result();
-  }
-  return result;
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
deleted file mode 100644
index e7ce91f..0000000
--- a/src/ia32/register-allocator-ia32.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  static const int kNumRegisters = 5;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 380d38f..27d2886 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
deleted file mode 100644
index 0304c32..0000000
--- a/src/ia32/virtual-frame-ia32.cc
+++ /dev/null
@@ -1,1366 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  // Emit code to write elements below the stack pointer to their
-  // (already allocated) stack address.
-  ASSERT(index <= stack_pointer_);
-  FrameElement element = elements_[index];
-  ASSERT(!element.is_synced());
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      break;
-
-    case FrameElement::MEMORY:
-      // This function should not be called with synced elements.
-      // (memory elements are always synced).
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ mov(Operand(ebp, fp_relative(index)), element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      if (cgen()->IsUnsafeSmi(element.handle())) {
-        cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
-      } else {
-        __ Set(Operand(ebp, fp_relative(index)),
-               Immediate(element.handle()));
-      }
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing_element = elements_[backing_index];
-      if (backing_element.is_memory()) {
-        Result temp = cgen()->allocator()->Allocate();
-        ASSERT(temp.is_valid());
-        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
-        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-      } else {
-        ASSERT(backing_element.is_register());
-        __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
-  // Sync an element of the frame that is just above the stack pointer
-  // by pushing it.
-  ASSERT(index == stack_pointer_ + 1);
-  stack_pointer_++;
-  FrameElement element = elements_[index];
-
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      __ push(Immediate(Smi::FromInt(0)));
-      break;
-
-    case FrameElement::MEMORY:
-      // No memory elements exist above the stack pointer.
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ push(element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      if (cgen()->IsUnsafeSmi(element.handle())) {
-       cgen()->PushUnsafeSmi(element.handle());
-      } else {
-        __ push(Immediate(element.handle()));
-      }
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing = elements_[backing_index];
-      ASSERT(backing.is_memory() || backing.is_register());
-      if (backing.is_memory()) {
-        __ push(Operand(ebp, fp_relative(backing_index)));
-      } else {
-        __ push(backing.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
-  ASSERT(begin >= 0);
-  ASSERT(end < element_count());
-  // Sync elements below the range if they have not been materialized
-  // on the stack.
-  int start = Min(begin, stack_pointer_ + 1);
-
-  // Emit normal push instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
-  for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) {
-      if (i <= stack_pointer_) {
-        SyncElementBelowStackPointer(i);
-      } else {
-        SyncElementByPushing(i);
-      }
-    }
-  }
-}
-
-
-void VirtualFrame::MakeMergable() {
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement element = elements_[i];
-
-    // All number type information is reset to unknown for a mergable frame
-    // because of incoming back edges.
-    if (element.is_constant() || element.is_copy()) {
-      if (element.is_synced()) {
-        // Just spill.
-        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
-      } else {
-        // Allocate to a register.
-        FrameElement backing_element;  // Invalid if not a copy.
-        if (element.is_copy()) {
-          backing_element = elements_[element.index()];
-        }
-        Result fresh = cgen()->allocator()->Allocate();
-        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
-        elements_[i] =
-            FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED,
-                                          TypeInfo::Unknown());
-        Use(fresh.reg(), i);
-
-        // Emit a move.
-        if (element.is_constant()) {
-          if (cgen()->IsUnsafeSmi(element.handle())) {
-            cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
-          } else {
-            __ Set(fresh.reg(), Immediate(element.handle()));
-          }
-        } else {
-          ASSERT(element.is_copy());
-          // Copies are only backed by register or memory locations.
-          if (backing_element.is_register()) {
-            // The backing store may have been spilled by allocating,
-            // but that's OK.  If it was, the value is right where we
-            // want it.
-            if (!fresh.reg().is(backing_element.reg())) {
-              __ mov(fresh.reg(), backing_element.reg());
-            }
-          } else {
-            ASSERT(backing_element.is_memory());
-            __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
-          }
-        }
-      }
-      // No need to set the copied flag --- there are no copies.
-    } else {
-      // Clear the copy flag of non-constant, non-copy elements.
-      // They cannot be copied because copies are not allowed.
-      // The copy flag is not relied on before the end of this loop,
-      // including when registers are spilled.
-      elements_[i].clear_copied();
-      elements_[i].set_type_info(TypeInfo::Unknown());
-    }
-  }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm(), "[ Merge frame");
-  // We should always be merging the code generator's current frame to an
-  // expected frame.
-  ASSERT(cgen()->frame() == this);
-
-  // Adjust the stack pointer upward (toward the top of the virtual
-  // frame) if necessary.
-  if (stack_pointer_ < expected->stack_pointer_) {
-    int difference = expected->stack_pointer_ - stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ sub(Operand(esp), Immediate(difference * kPointerSize));
-  }
-
-  MergeMoveRegistersToMemory(expected);
-  MergeMoveRegistersToRegisters(expected);
-  MergeMoveMemoryToRegisters(expected);
-
-  // Adjust the stack pointer downward if necessary.
-  if (stack_pointer_ > expected->stack_pointer_) {
-    int difference = stack_pointer_ - expected->stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ add(Operand(esp), Immediate(difference * kPointerSize));
-  }
-
-  // At this point, the frames should be identical.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  // Move registers, constants, and copies to memory.  Perform moves
-  // from the top downward in the frame in order to leave the backing
-  // stores of copies in registers.
-  //
-  // Moving memory-backed copies to memory requires a spare register
-  // for the memory-to-memory moves.  Since we are performing a merge,
-  // we use esi (which is already saved in the frame).  We keep track
-  // of the index of the frame element esi is caching or kIllegalIndex
-  // if esi has not been disturbed.
-  int esi_caches = kIllegalIndex;
-  for (int i = element_count() - 1; i >= 0; i--) {
-    FrameElement target = expected->elements_[i];
-    if (target.is_register()) continue;  // Handle registers later.
-    if (target.is_memory()) {
-      FrameElement source = elements_[i];
-      switch (source.type()) {
-        case FrameElement::INVALID:
-          // Not a legal merge move.
-          UNREACHABLE();
-          break;
-
-        case FrameElement::MEMORY:
-          // Already in place.
-          break;
-
-        case FrameElement::REGISTER:
-          Unuse(source.reg());
-          if (!source.is_synced()) {
-            __ mov(Operand(ebp, fp_relative(i)), source.reg());
-          }
-          break;
-
-        case FrameElement::CONSTANT:
-          if (!source.is_synced()) {
-            if (cgen()->IsUnsafeSmi(source.handle())) {
-              esi_caches = i;
-              cgen()->MoveUnsafeSmi(esi, source.handle());
-              __ mov(Operand(ebp, fp_relative(i)), esi);
-            } else {
-              __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
-            }
-          }
-          break;
-
-        case FrameElement::COPY:
-          if (!source.is_synced()) {
-            int backing_index = source.index();
-            FrameElement backing_element = elements_[backing_index];
-            if (backing_element.is_memory()) {
-              // If we have to spill a register, we spill esi.
-              if (esi_caches != backing_index) {
-                esi_caches = backing_index;
-                __ mov(esi, Operand(ebp, fp_relative(backing_index)));
-              }
-              __ mov(Operand(ebp, fp_relative(i)), esi);
-            } else {
-              ASSERT(backing_element.is_register());
-              __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
-            }
-          }
-          break;
-      }
-    }
-    elements_[i] = target;
-  }
-
-  if (esi_caches != kIllegalIndex) {
-    __ mov(esi, Operand(ebp, fp_relative(context_index())));
-  }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  // We have already done X-to-memory moves.
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    // Move the right value into register i if it is currently in a register.
-    int index = expected->register_location(i);
-    int use_index = register_location(i);
-    // Skip if register i is unused in the target or else if source is
-    // not a register (this is not a register-to-register move).
-    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
-    Register target = RegisterAllocator::ToRegister(i);
-    Register source = elements_[index].reg();
-    if (index != use_index) {
-      if (use_index == kIllegalIndex) {  // Target is currently unused.
-        // Copy contents of source from source to target.
-        // Set frame element register to target.
-        Use(target, index);
-        Unuse(source);
-        __ mov(target, source);
-      } else {
-        // Exchange contents of registers source and target.
-        // Nothing except the register backing use_index has changed.
-        elements_[use_index].set_reg(source);
-        set_register_location(target, index);
-        set_register_location(source, use_index);
-        __ xchg(source, target);
-      }
-    }
-
-    if (!elements_[index].is_synced() &&
-        expected->elements_[index].is_synced()) {
-      __ mov(Operand(ebp, fp_relative(index)), target);
-    }
-    elements_[index] = expected->elements_[index];
-  }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  // Move memory, constants, and copies to registers.  This is the
-  // final step and since it is not done from the bottom up, but in
-  // register code order, we have special code to ensure that the backing
-  // elements of copies are in their correct locations when we
-  // encounter the copies.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int index = expected->register_location(i);
-    if (index != kIllegalIndex) {
-      FrameElement source = elements_[index];
-      FrameElement target = expected->elements_[index];
-      Register target_reg = RegisterAllocator::ToRegister(i);
-      ASSERT(target.reg().is(target_reg));
-      switch (source.type()) {
-        case FrameElement::INVALID:  // Fall through.
-          UNREACHABLE();
-          break;
-        case FrameElement::REGISTER:
-          ASSERT(source.Equals(target));
-          // Go to next iteration.  Skips Use(target_reg) and syncing
-          // below.  It is safe to skip syncing because a target
-          // register frame element would only be synced if all source
-          // elements were.
-          continue;
-          break;
-        case FrameElement::MEMORY:
-          ASSERT(index <= stack_pointer_);
-          __ mov(target_reg, Operand(ebp, fp_relative(index)));
-          break;
-
-        case FrameElement::CONSTANT:
-          if (cgen()->IsUnsafeSmi(source.handle())) {
-            cgen()->MoveUnsafeSmi(target_reg, source.handle());
-          } else {
-           __ Set(target_reg, Immediate(source.handle()));
-          }
-          break;
-
-        case FrameElement::COPY: {
-          int backing_index = source.index();
-          FrameElement backing = elements_[backing_index];
-          ASSERT(backing.is_memory() || backing.is_register());
-          if (backing.is_memory()) {
-            ASSERT(backing_index <= stack_pointer_);
-            // Code optimization if backing store should also move
-            // to a register: move backing store to its register first.
-            if (expected->elements_[backing_index].is_register()) {
-              FrameElement new_backing = expected->elements_[backing_index];
-              Register new_backing_reg = new_backing.reg();
-              ASSERT(!is_used(new_backing_reg));
-              elements_[backing_index] = new_backing;
-              Use(new_backing_reg, backing_index);
-              __ mov(new_backing_reg,
-                     Operand(ebp, fp_relative(backing_index)));
-              __ mov(target_reg, new_backing_reg);
-            } else {
-              __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
-            }
-          } else {
-            __ mov(target_reg, backing.reg());
-          }
-        }
-      }
-      // Ensure the proper sync state.
-      if (target.is_synced() && !source.is_synced()) {
-        __ mov(Operand(ebp, fp_relative(index)), target_reg);
-      }
-      Use(target_reg, index);
-      elements_[index] = target;
-    }
-  }
-}
-
-
-void VirtualFrame::Enter() {
-  // Registers live on entry: esp, ebp, esi, edi.
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  if (FLAG_debug_code) {
-    // Verify that edi contains a JS function.  The following code
-    // relies on eax being available for use.
-    __ test(edi, Immediate(kSmiTagMask));
-    __ Check(not_zero,
-             "VirtualFrame::Enter - edi is not a function (smi check).");
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-    __ Check(equal,
-             "VirtualFrame::Enter - edi is not a function (map check).");
-  }
-#endif
-
-  EmitPush(ebp);
-
-  __ mov(ebp, Operand(esp));
-
-  // Store the context in the frame.  The context is kept in esi and a
-  // copy is stored in the frame.  The external reference to esi
-  // remains.
-  EmitPush(esi);
-
-  // Store the function in the frame.  The frame owns the register
-  // reference now (ie, it can keep it in edi or spill it later).
-  Push(edi);
-  SyncElementAt(element_count() - 1);
-  cgen()->allocator()->Unuse(edi);
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See VisitReturnStatement for the full return sequence.
-  __ mov(esp, Operand(ebp));
-  stack_pointer_ = frame_pointer();
-  for (int i = element_count() - 1; i > stack_pointer_; i--) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      Unuse(last.reg());
-    }
-  }
-
-  EmitPop(ebp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    // The locals are initialized to a constant (the undefined value), but
-    // we sync them with the actual frame to allocate space for spilling
-    // them later.  First sync everything above the stack pointer so we can
-    // use pushes to allocate and initialize the locals.
-    SyncRange(stack_pointer_ + 1, element_count() - 1);
-    Handle<Object> undefined = FACTORY->undefined_value();
-    FrameElement initial_value =
-        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count == 1) {
-      __ push(Immediate(undefined));
-    } else if (count < kLocalVarBound) {
-      // For less locals the unrolled loop is more compact.
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ Set(temp.reg(), Immediate(undefined));
-      for (int i = 0; i < count; i++) {
-        __ push(temp.reg());
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      Result cnt = cgen()->allocator()->Allocate();
-      Result tmp = cgen()->allocator()->Allocate();
-      ASSERT(cnt.is_valid());
-      ASSERT(tmp.is_valid());
-      __ mov(cnt.reg(), Immediate(count));
-      __ mov(tmp.reg(), Immediate(undefined));
-      __ bind(&alloc_locals_loop);
-      __ push(tmp.reg());
-      __ dec(cnt.reg());
-      __ j(not_zero, &alloc_locals_loop);
-    }
-    for (int i = 0; i < count; i++) {
-      elements_.Add(initial_value);
-      stack_pointer_++;
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ mov(Operand(ebp, fp_relative(context_index())), esi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ mov(esi, Operand(ebp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen()->allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ lea(temp.reg(), ParameterAt(-1));
-  Push(&temp);
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  FrameElement original = elements_[index];
-
-  // Is this element the backing store of any copies?
-  int new_backing_index = kIllegalIndex;
-  if (original.is_copied()) {
-    // Verify it is copied, and find first copy.
-    for (int i = index + 1; i < element_count(); i++) {
-      if (elements_[i].is_copy() && elements_[i].index() == index) {
-        new_backing_index = i;
-        break;
-      }
-    }
-  }
-
-  if (new_backing_index == kIllegalIndex) {
-    // No copies found, return kIllegalIndex.
-    if (original.is_register()) {
-      Unuse(original.reg());
-    }
-    elements_[index] = FrameElement::InvalidElement();
-    return kIllegalIndex;
-  }
-
-  // This is the backing store of copies.
-  Register backing_reg;
-  if (original.is_memory()) {
-    Result fresh = cgen()->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    Use(fresh.reg(), new_backing_index);
-    backing_reg = fresh.reg();
-    __ mov(backing_reg, Operand(ebp, fp_relative(index)));
-  } else {
-    // The original was in a register.
-    backing_reg = original.reg();
-    set_register_location(backing_reg, new_backing_index);
-  }
-  // Invalidate the element at index.
-  elements_[index] = FrameElement::InvalidElement();
-  // Set the new backing element.
-  if (elements_[new_backing_index].is_synced()) {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::SYNCED,
-                                      original.type_info());
-  } else {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::NOT_SYNCED,
-                                      original.type_info());
-  }
-  // Update the other copies.
-  for (int i = new_backing_index + 1; i < element_count(); i++) {
-    if (elements_[i].is_copy() && elements_[i].index() == index) {
-      elements_[i].set_index(new_backing_index);
-      elements_[new_backing_index].set_copied();
-    }
-  }
-  return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  int new_backing_store_index = InvalidateFrameSlotAt(index);
-  if (new_backing_store_index != kIllegalIndex) {
-    elements_.Add(CopyElementAt(new_backing_store_index));
-    return;
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY: {
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      Use(fresh.reg(), element_count());
-      elements_.Add(new_element);
-      __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
-      break;
-    }
-    case FrameElement::REGISTER:
-      Use(original.reg(), element_count());
-      // Fall through.
-    case FrameElement::CONSTANT:
-    case FrameElement::COPY:
-      original.clear_sync();
-      elements_.Add(original);
-      break;
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  // Store the value on top of the frame to the virtual frame slot at
-  // a given index.  The value on top of the frame is left in place.
-  // This is a duplicating operation, so it can create copies.
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  int top_index = element_count() - 1;
-  FrameElement top = elements_[top_index];
-  FrameElement original = elements_[index];
-  if (top.is_copy() && top.index() == index) return;
-  ASSERT(top.is_valid());
-
-  InvalidateFrameSlotAt(index);
-
-  // InvalidateFrameSlotAt can potentially change any frame element, due
-  // to spilling registers to allocate temporaries in order to preserve
-  // the copy-on-write semantics of aliased elements.  Reload top from
-  // the frame.
-  top = elements_[top_index];
-
-  if (top.is_copy()) {
-    // There are two cases based on the relative positions of the
-    // stored-to slot and the backing slot of the top element.
-    int backing_index = top.index();
-    ASSERT(backing_index != index);
-    if (backing_index < index) {
-      // 1. The top element is a copy of a slot below the stored-to
-      // slot.  The stored-to slot becomes an unsynced copy of that
-      // same backing slot.
-      elements_[index] = CopyElementAt(backing_index);
-    } else {
-      // 2. The top element is a copy of a slot above the stored-to
-      // slot.  The stored-to slot becomes the new (unsynced) backing
-      // slot and both the top element and the element at the former
-      // backing slot become copies of it.  The sync state of the top
-      // and former backing elements is preserved.
-      FrameElement backing_element = elements_[backing_index];
-      ASSERT(backing_element.is_memory() || backing_element.is_register());
-      if (backing_element.is_memory()) {
-        // Because sets of copies are canonicalized to be backed by
-        // their lowest frame element, and because memory frame
-        // elements are backed by the corresponding stack address, we
-        // have to move the actual value down in the stack.
-        //
-        // TODO(209): considering allocating the stored-to slot to the
-        // temp register.  Alternatively, allow copies to appear in
-        // any order in the frame and lazily move the value down to
-        // the slot.
-        Result temp = cgen()->allocator()->Allocate();
-        ASSERT(temp.is_valid());
-        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
-        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-      } else {
-        set_register_location(backing_element.reg(), index);
-        if (backing_element.is_synced()) {
-          // If the element is a register, we will not actually move
-          // anything on the stack but only update the virtual frame
-          // element.
-          backing_element.clear_sync();
-        }
-      }
-      elements_[index] = backing_element;
-
-      // The old backing element becomes a copy of the new backing
-      // element.
-      FrameElement new_element = CopyElementAt(index);
-      elements_[backing_index] = new_element;
-      if (backing_element.is_synced()) {
-        elements_[backing_index].set_sync();
-      }
-
-      // All the copies of the old backing element (including the top
-      // element) become copies of the new backing element.
-      for (int i = backing_index + 1; i < element_count(); i++) {
-        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
-          elements_[i].set_index(index);
-        }
-      }
-    }
-    return;
-  }
-
-  // Move the top element to the stored-to slot and replace it (the
-  // top element) with a copy.
-  elements_[index] = top;
-  if (top.is_memory()) {
-    // TODO(209): consider allocating the stored-to slot to the temp
-    // register.  Alternatively, allow copies to appear in any order
-    // in the frame and lazily move the value down to the slot.
-    FrameElement new_top = CopyElementAt(index);
-    new_top.set_sync();
-    elements_[top_index] = new_top;
-
-    // The sync state of the former top element is correct (synced).
-    // Emit code to move the value down in the frame.
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), Operand(esp, 0));
-    __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-  } else if (top.is_register()) {
-    set_register_location(top.reg(), index);
-    // The stored-to slot has the (unsynced) register reference and
-    // the top element becomes a copy.  The sync state of the top is
-    // preserved.
-    FrameElement new_top = CopyElementAt(index);
-    if (top.is_synced()) {
-      new_top.set_sync();
-      elements_[index].clear_sync();
-    }
-    elements_[top_index] = new_top;
-  } else {
-    // The stored-to slot holds the same value as the top but
-    // unsynced.  (We do not have copies of constants yet.)
-    ASSERT(top.is_constant());
-    elements_[index].clear_sync();
-  }
-}
-
-
-void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  if (original.is_copy()) {
-    original = elements_[original.index()];
-    index = original.index();
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY:
-    case FrameElement::REGISTER:  {
-      Label done;
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      Register fresh_reg = fresh.reg();
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh_reg,
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      new_element.set_untagged_int32(true);
-      Use(fresh_reg, element_count());
-      fresh.Unuse();  // BreakTarget does not handle a live Result well.
-      elements_.Add(new_element);
-      if (original.is_register()) {
-        __ mov(fresh_reg, original.reg());
-      } else {
-        ASSERT(original.is_memory());
-        __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
-      }
-      // Now convert the value to int32, or bail out.
-      if (original.type_info().IsSmi()) {
-        __ SmiUntag(fresh_reg);
-        // Pushing the element is completely done.
-      } else {
-        __ test(fresh_reg, Immediate(kSmiTagMask));
-        Label not_smi;
-        __ j(not_zero, &not_smi);
-        __ SmiUntag(fresh_reg);
-        __ jmp(&done);
-
-        __ bind(&not_smi);
-        if (!original.type_info().IsNumber()) {
-          __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
-                 FACTORY->heap_number_map());
-          cgen()->unsafe_bailout_->Branch(not_equal);
-        }
-
-        if (!CpuFeatures::IsSupported(SSE2)) {
-          UNREACHABLE();
-        } else {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
-          __ cvttsd2si(fresh_reg, Operand(xmm0));
-          __ cvtsi2sd(xmm1, Operand(fresh_reg));
-          __ ucomisd(xmm0, xmm1);
-          cgen()->unsafe_bailout_->Branch(not_equal);
-          cgen()->unsafe_bailout_->Branch(parity_even);  // NaN.
-          // Test for negative zero.
-          __ test(fresh_reg, Operand(fresh_reg));
-          __ j(not_zero, &done);
-          __ movmskpd(fresh_reg, xmm0);
-          __ and_(fresh_reg, 0x1);
-          cgen()->unsafe_bailout_->Branch(not_equal);
-        }
-        __ bind(&done);
-      }
-      break;
-    }
-    case FrameElement::CONSTANT:
-      elements_.Add(CopyElementAt(index));
-      elements_[element_count() - 1].set_untagged_int32(true);
-      break;
-    case FrameElement::COPY:
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less one (the return
-  // address is already pushed by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallStub(stub);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
-  PrepareForCall(0, 0);
-  arg->ToRegister(eax);
-  arg->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  PrepareForCall(0, 0);
-
-  if (arg0->is_register() && arg0->reg().is(eax)) {
-    if (arg1->is_register() && arg1->reg().is(edx)) {
-      // Wrong registers.
-      __ xchg(eax, edx);
-    } else {
-      // Register edx is free for arg0, which frees eax for arg1.
-      arg0->ToRegister(edx);
-      arg1->ToRegister(eax);
-    }
-  } else {
-    // Register eax is free for arg1, which guarantees edx is free for
-    // arg0.
-    arg1->ToRegister(eax);
-    arg0->ToRegister(edx);
-  }
-
-  arg0->Unuse();
-  arg1->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
-  Result function = Pop();
-
-  // InvokeFunction requires function in edi.  Move it in there.
-  function.ToRegister(edi);
-  function.Unuse();
-
-  // +1 for receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(edi, count, CALL_FUNCTION);
-  RestoreContextRegister();
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  PrepareForCall(0, 0);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
-                                          Result* b,
-                                          Register a_reg,
-                                          Register b_reg) {
-  if (a->is_register() && a->reg().is(a_reg)) {
-    b->ToRegister(b_reg);
-  } else if (!cgen()->allocator()->is_used(a_reg)) {
-    a->ToRegister(a_reg);
-    b->ToRegister(b_reg);
-  } else if (cgen()->allocator()->is_used(b_reg)) {
-    // a must be in b_reg, b in a_reg.
-    __ xchg(a_reg, b_reg);
-    // Results a and b will be invalidated, so it is ok if they are switched.
-  } else {
-    b->ToRegister(b_reg);
-    a->ToRegister(a_reg);
-  }
-  a->Unuse();
-  b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
-  // Name and receiver are on the top of the frame.  The IC expects
-  // name in ecx and receiver in eax.
-  Result name = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);  // No stack arguments.
-  MoveResultsToRegisters(&name, &receiver, ecx, eax);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame. Put them in eax and edx.
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&key, &receiver, eax, edx);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
-                                 bool is_contextual,
-                                 StrictModeFlag strict_mode) {
-  // Value and (if not contextual) receiver are on top of the frame.
-  // The IC expects name in ecx, value in eax, and receiver in edx.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-
-  Result value = Pop();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    PrepareForCall(0, 0);
-    value.ToRegister(eax);
-    __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    value.Unuse();
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    Result receiver = Pop();
-    PrepareForCall(0, 0);
-    MoveResultsToRegisters(&value, &receiver, eax, edx);
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ mov(ecx, name);
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in eax, key in ecx, and receiver in edx.
-  Result value = Pop();
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  if (!cgen()->allocator()->is_used(eax) ||
-      (value.is_register() && value.reg().is(eax))) {
-    if (!cgen()->allocator()->is_used(eax)) {
-      value.ToRegister(eax);
-    }
-    MoveResultsToRegisters(&key, &receiver, ecx, edx);
-    value.Unuse();
-  } else if (!cgen()->allocator()->is_used(ecx) ||
-             (key.is_register() && key.reg().is(ecx))) {
-    if (!cgen()->allocator()->is_used(ecx)) {
-      key.ToRegister(ecx);
-    }
-    MoveResultsToRegisters(&value, &receiver, eax, edx);
-    key.Unuse();
-  } else if (!cgen()->allocator()->is_used(edx) ||
-             (receiver.is_register() && receiver.reg().is(edx))) {
-    if (!cgen()->allocator()->is_used(edx)) {
-      receiver.ToRegister(edx);
-    }
-    MoveResultsToRegisters(&key, &value, ecx, eax);
-    receiver.Unuse();
-  } else {
-    // All three registers are used, and no value is in the correct place.
-    // We have one of the two circular permutations of eax, ecx, edx.
-    ASSERT(value.is_register());
-    if (value.reg().is(ecx)) {
-      __ xchg(eax, edx);
-      __ xchg(eax, ecx);
-    } else {
-      __ xchg(eax, ecx);
-      __ xchg(eax, edx);
-    }
-    value.Unuse();
-    key.Unuse();
-    receiver.Unuse();
-  }
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
-                                int arg_count,
-                                int loop_nesting) {
-  // Function name, arguments, and receiver are on top of the frame.
-  // The IC expects the name in ecx and the rest on the stack and
-  // drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic = Isolate::Current()->stub_cache()->ComputeCallInitialize(
-      arg_count, in_loop);
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  Result name = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Arguments + receiver.
-  name.ToRegister(ecx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
-                                     int arg_count,
-                                     int loop_nesting) {
-  // Function name, arguments, and receiver are on top of the frame.
-  // The IC expects the name in ecx and the rest on the stack and
-  // drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      Isolate::Current()->stub_cache()->ComputeKeyedCallInitialize(arg_count,
-                                                                   in_loop);
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  Result name = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Arguments + receiver.
-  name.ToRegister(ecx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
-  // Arguments, receiver, and function are on top of the frame.  The
-  // IC expects arg count in eax, function in edi, and the arguments
-  // and receiver on the stack.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  // Duplicate the function before preparing the frame.
-  PushElementAt(arg_count);
-  Result function = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill function and args.
-  function.ToRegister(edi);
-
-  // Constructors are called with the number of arguments in register
-  // eax for now. Another option would be to have separate construct
-  // call trampolines per different arguments counts encountered.
-  Result num_args = cgen()->allocator()->Allocate(eax);
-  ASSERT(num_args.is_valid());
-  __ Set(num_args.reg(), Immediate(arg_count));
-
-  function.Unuse();
-  num_args.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-Result VirtualFrame::Pop() {
-  FrameElement element = elements_.RemoveLast();
-  int index = element_count();
-  ASSERT(element.is_valid());
-  ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
-
-  // Get number type information of the result.
-  TypeInfo info;
-  if (!element.is_copy()) {
-    info = element.type_info();
-  } else {
-    info = elements_[element.index()].type_info();
-  }
-
-  bool pop_needed = (stack_pointer_ == index);
-  if (pop_needed) {
-    stack_pointer_--;
-    if (element.is_memory()) {
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ pop(temp.reg());
-      temp.set_type_info(info);
-      temp.set_untagged_int32(element.is_untagged_int32());
-      return temp;
-    }
-
-    __ add(Operand(esp), Immediate(kPointerSize));
-  }
-  ASSERT(!element.is_memory());
-
-  // The top element is a register, constant, or a copy.  Unuse
-  // registers and follow copies to their backing store.
-  if (element.is_register()) {
-    Unuse(element.reg());
-  } else if (element.is_copy()) {
-    ASSERT(!element.is_untagged_int32());
-    ASSERT(element.index() < index);
-    index = element.index();
-    element = elements_[index];
-  }
-  ASSERT(!element.is_copy());
-
-  // The element is memory, a register, or a constant.
-  if (element.is_memory()) {
-    // Memory elements could only be the backing store of a copy.
-    // Allocate the original to a register.
-    ASSERT(index <= stack_pointer_);
-    ASSERT(!element.is_untagged_int32());
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    Use(temp.reg(), index);
-    FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(),
-                                      FrameElement::SYNCED,
-                                      element.type_info());
-    // Preserve the copy flag on the element.
-    if (element.is_copied()) new_element.set_copied();
-    elements_[index] = new_element;
-    __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
-    return Result(temp.reg(), info);
-  } else if (element.is_register()) {
-    Result return_value(element.reg(), info);
-    return_value.set_untagged_int32(element.is_untagged_int32());
-    return return_value;
-  } else {
-    ASSERT(element.is_constant());
-    Result return_value(element.handle());
-    return_value.set_untagged_int32(element.is_untagged_int32());
-    return return_value;
-  }
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(Operand operand) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(immediate);
-}
-
-
-void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
-  ASSERT(!ConstantPoolOverflowed());
-  elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
-  elements_[element_count() - 1].set_untagged_int32(true);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
-  ASSERT(expr->IsTrivial());
-
-  Literal* lit = expr->AsLiteral();
-  if (lit != NULL) {
-    Push(lit->handle());
-    return;
-  }
-
-  VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL) {
-    Slot* slot = proxy->var()->AsSlot();
-    if (slot->type() == Slot::LOCAL) {
-      PushLocalAt(slot->index());
-      return;
-    }
-    if (slot->type() == Slot::PARAMETER) {
-      PushParameterAt(slot->index());
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  if (ConstantPoolOverflowed()) {
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ Set(temp.reg(), Immediate(value));
-    Push(&temp);
-  } else {
-    FrameElement element =
-        FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-    elements_.Add(element);
-  }
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
deleted file mode 100644
index 504a8fc..0000000
--- a/src/ia32/virtual-frame-ia32.h
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
-#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame: public ZoneObject {
- public:
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
-      ASSERT(cgen()->has_valid_frame());
-      cgen()->frame()->SpillAll();
-      cgen()->set_in_spilled_code(true);
-    }
-
-    ~SpilledScope() {
-      cgen()->set_in_spilled_code(previous_state_);
-    }
-
-   private:
-    bool previous_state_;
-
-    CodeGenerator* cgen() {
-      return CodeGeneratorScope::Current(Isolate::Current());
-    }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  CodeGenerator* cgen() {
-    return CodeGeneratorScope::Current(Isolate::Current());
-  }
-
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-    TypeInfo info = TypeInfo::Uninitialized());
-
-  // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
-
-  // The height of the virtual expression stack.
-  int height() { return element_count() - expression_base_index(); }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  inline int register_location(Register reg);
-
-  inline void set_register_location(Register reg, int index);
-
-  bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  inline bool is_used(Register reg);
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget count elements from the top of the frame all in-memory
-  // (including synced) and adjust the stack pointer downward, to
-  // match an external frame effect (examples include a call removing
-  // its arguments, and exiting a try/catch removing an exception
-  // handler).  No code will be emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    ForgetElements(count);
-  }
-
-  // Forget count elements from the top of the frame without adjusting
-  // the stack pointer downward.  This is used, for example, before
-  // merging frames at break, continue, and return targets.
-  void ForgetElements(int count);
-
-  // Spill all values from the frame to memory.
-  inline void SpillAll();
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
-  }
-
-  // Make the two registers distinct and spill them.  Returns the second
-  // register.  If the registers were not distinct then it returns the new
-  // second register.
-  Result MakeDistinctAndSpilled(Result* left, Result* right) {
-    Spill(left->reg());
-    Spill(right->reg());
-    if (left->reg().is(right->reg())) {
-      RegisterAllocator* allocator = cgen()->allocator();
-      Result fresh = allocator->Allocate();
-      ASSERT(fresh.is_valid());
-      masm()->mov(fresh.reg(), right->reg());
-      return fresh;
-    }
-    return *right;
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
-  Register SpillAnyRegister();
-
-  // Spill the top element of the frame.
-  void SpillTop() { SpillElementAt(element_count() - 1); }
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the frame.
-  void MakeMergable();
-
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected);
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Use(i);
-    }
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by spilling locals.  This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 10;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // An element of the expression stack as an assembly operand.
-  Operand ElementAt(int index) const {
-    return Operand(esp, index * kPointerSize);
-  }
-
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  inline void SetElementAt(int index, Handle<Object> value);
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
-  void StoreToElementAt(int index) {
-    StoreToFrameSlotAt(element_count() - index - 1);
-  }
-
-  // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return Operand(ebp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void UntaggedPushLocalAt(int index) {
-    UntaggedPushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // Push the function on top of the frame.
-  void PushFunction() {
-    PushFrameSlotAt(function_index());
-  }
-
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
-  // A parameter as an assembly operand.
-  Operand ParameterAt(int index) {
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count());
-    return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void UntaggedPushParameterAt(int index) {
-    UntaggedPushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
-  // The receiver frame slot.
-  Operand Receiver() {
-    return ParameterAt(-1);
-  }
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline Result CallStub(CodeStub* stub, int arg_count);
-
-  // Call stub that takes a single argument passed in eax.  The
-  // argument is given as a result which does not have to be eax or
-  // even a register.  The argument is consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg);
-
-  // Call stub that takes a pair of arguments passed in edx (arg0) and
-  // eax (arg1).  The arguments are given as results which do not have
-  // to be in the proper registers or even in registers.  The
-  // arguments are consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  Result CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  Result CallRuntime(const Runtime::Function* f, int arg_count);
-  Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
-
-  // Call load IC.  Name and receiver are found on top of the frame.
-  // Both are dropped.
-  Result CallLoadIC(RelocInfo::Mode mode);
-
-  // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  Both are dropped.
-  Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
-  // Call store IC.  If the load is contextual, value is found on top of the
-  // frame.  If not, value and receiver are on the frame.  Both are dropped.
-  Result CallStoreIC(Handle<String> name, bool is_contextual,
-                     StrictModeFlag strict_mode);
-
-  // Call keyed store IC.  Value, key, and receiver are found on top
-  // of the frame.  All three are dropped.
-  Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call call IC.  Function name, arguments, and receiver are found on top
-  // of the frame and dropped by the call.  The argument count does not
-  // include the receiver.
-  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Call keyed call IC.  Same calling convention as CallCallIC.
-  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Allocate and call JS function as constructor.  Arguments,
-  // receiver (global object), and function are found on top of the
-  // frame.  Function is not dropped.  The argument count does not
-  // include the receiver.
-  Result CallConstructor(int arg_count);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() {
-    Drop(1);
-  }
-
-  // Duplicate the top element of the frame.
-  void Dup() {
-    PushFrameSlotAt(element_count() - 1);
-  }
-
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  void EmitPop(Operand operand);
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Operand operand,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Immediate immediate,
-                TypeInfo info = TypeInfo::Unknown());
-
-  inline bool ConstantPoolOverflowed();
-
-  // Push an element on the virtual frame.
-  void Push(Handle<Object> value);
-  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
-  inline void Push(Smi* value);
-
-  void PushUntaggedElement(Handle<Object> value);
-
-  // Pushing a result invalidates it (its contents become owned by the
-  // frame).
-  void Push(Result* result) {
-    // This assert will trigger if you try to push the same value twice.
-    ASSERT(result->is_valid());
-    if (result->is_register()) {
-      Push(result->reg(), result->type_info());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    if (cgen()->in_safe_int32_mode()) {
-      ASSERT(result->is_untagged_int32());
-      elements_[element_count() - 1].set_untagged_int32(true);
-    }
-    result->Unuse();
-  }
-
-  // Pushing an expression expects that the expression is trivial (according
-  // to Expression::IsTrivial).
-  void Push(Expression* expr);
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  // Check that the frame has no elements containing untagged int32 elements.
-  bool HasNoUntaggedInt32Elements() {
-    for (int i = 0; i < element_count(); ++i) {
-      if (elements_[i].is_untagged_int32()) return false;
-    }
-    return true;
-  }
-
-  // Update the type information of a variable frame element directly.
-  inline void SetTypeForLocalAt(int index, TypeInfo info);
-  inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  ZoneList<FrameElement> elements_;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the esp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count();
-
-  inline int local_count();
-
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).  The parameters, receiver, and return address
-  // are below the frame pointer.
-  int frame_pointer() {
-    return parameter_count() + 2;
-  }
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() {
-    return 1;
-  }
-
-  // The index of the context slot in the frame.  It is immediately
-  // above the frame pointer.
-  int context_index() {
-    return frame_pointer() + 1;
-  }
-
-  // The index of the function slot in the frame.  It is above the frame
-  // pointer and the context slot.
-  int function_index() {
-    return frame_pointer() + 2;
-  }
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lie the context and the function.
-  int local0_index() {
-    return frame_pointer() + 3;
-  }
-
-  // The index of the base of the expression stack.
-  int expression_base_index() {
-    return local0_index() + local_count();
-  }
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame, at an untagged int32 value.  Bails out if the value is not
-  // an int32.
-  void UntaggedPushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  // This function assumes that a and b are the only results that could be in
-  // the registers a_reg or b_reg.  Other results can be live, but must not
-  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
-  void MoveResultsToRegisters(Result* a,
-                              Result* b,
-                              Register a_reg,
-                              Register b_reg);
-
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  Result RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class FrameRegisterState;
-  friend class JumpTarget;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/src/ic.cc b/src/ic.cc
index dd4d25b..99eb21f 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -282,7 +282,6 @@
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::BINARY_OP_IC:
     case Code::TYPE_RECORDING_BINARY_OP_IC:
     case Code::COMPARE_IC:
       // Clearing these is tricky and does not
@@ -1979,147 +1978,6 @@
 }
 
 
-void BinaryOpIC::patch(Code* code) {
-  set_target(code);
-}
-
-
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
-  switch (type_info) {
-    case UNINIT_OR_SMI: return "UninitOrSmi";
-    case DEFAULT: return "Default";
-    case GENERIC: return "Generic";
-    case HEAP_NUMBERS: return "HeapNumbers";
-    case STRINGS: return "Strings";
-    default: return "Invalid";
-  }
-}
-
-
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
-  switch (type_info) {
-    case UNINIT_OR_SMI:
-      return UNINITIALIZED;
-    case DEFAULT:
-    case HEAP_NUMBERS:
-    case STRINGS:
-      return MONOMORPHIC;
-    case GENERIC:
-      return MEGAMORPHIC;
-  }
-  UNREACHABLE();
-  return UNINITIALIZED;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
-                                             Object* right) {
-  if (left->IsSmi() && right->IsSmi()) {
-    // If we have two smi inputs we can reach here because
-    // of an overflow. Enter default state.
-    return DEFAULT;
-  }
-
-  if (left->IsNumber() && right->IsNumber()) {
-    return HEAP_NUMBERS;
-  }
-
-  if (left->IsString() || right->IsString()) {
-    // Patching for fast string ADD makes sense even if only one of the
-    // arguments is a string.
-    return STRINGS;
-  }
-
-  return GENERIC;
-}
-
-
-// defined in code-stubs-<arch>.cc
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
-
-
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
-  ASSERT(args.length() == 5);
-
-  HandleScope scope(isolate);
-  Handle<Object> left = args.at<Object>(0);
-  Handle<Object> right = args.at<Object>(1);
-  int key = Smi::cast(args[2])->value();
-  Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-  BinaryOpIC::TypeInfo previous_type =
-      static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
-  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
-  Handle<Code> code = GetBinaryOpStub(key, type);
-  if (!code.is_null()) {
-    BinaryOpIC ic(isolate);
-    ic.patch(*code);
-    if (FLAG_trace_ic) {
-      PrintF("[BinaryOpIC (%s->%s)#%s]\n",
-             BinaryOpIC::GetName(previous_type),
-             BinaryOpIC::GetName(type),
-             Token::Name(op));
-    }
-  }
-
-  Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
-      isolate->thread_local_top()->context_->builtins(), isolate);
-  Object* builtin = NULL;  // Initialization calms down the compiler.
-  switch (op) {
-    case Token::ADD:
-      builtin = builtins->javascript_builtin(Builtins::ADD);
-      break;
-    case Token::SUB:
-      builtin = builtins->javascript_builtin(Builtins::SUB);
-      break;
-    case Token::MUL:
-      builtin = builtins->javascript_builtin(Builtins::MUL);
-      break;
-    case Token::DIV:
-      builtin = builtins->javascript_builtin(Builtins::DIV);
-      break;
-    case Token::MOD:
-      builtin = builtins->javascript_builtin(Builtins::MOD);
-      break;
-    case Token::BIT_AND:
-      builtin = builtins->javascript_builtin(Builtins::BIT_AND);
-      break;
-    case Token::BIT_OR:
-      builtin = builtins->javascript_builtin(Builtins::BIT_OR);
-      break;
-    case Token::BIT_XOR:
-      builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
-      break;
-    case Token::SHR:
-      builtin = builtins->javascript_builtin(Builtins::SHR);
-      break;
-    case Token::SAR:
-      builtin = builtins->javascript_builtin(Builtins::SAR);
-      break;
-    case Token::SHL:
-      builtin = builtins->javascript_builtin(Builtins::SHL);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  Handle<JSFunction> builtin_function(JSFunction::cast(builtin),
-                                      isolate);
-
-  bool caught_exception;
-  Object** builtin_args[] = { right.location() };
-  Handle<Object> result = Execution::Call(builtin_function,
-                                          left,
-                                          ARRAY_SIZE(builtin_args),
-                                          builtin_args,
-                                          &caught_exception);
-  if (caught_exception) {
-    return Failure::Exception();
-  }
-  return *result;
-}
-
-
 void TRBinaryOpIC::patch(Code* code) {
   set_target(code);
 }
diff --git a/src/ic.h b/src/ic.h
index bb8a981..911cbd8 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -53,7 +53,6 @@
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(BinaryOp_Patch)                                 \
   ICU(TypeRecordingBinaryOp_Patch)                    \
   ICU(CompareIC_Miss)
 //
@@ -577,30 +576,6 @@
 };
 
 
-class BinaryOpIC: public IC {
- public:
-
-  enum TypeInfo {
-    UNINIT_OR_SMI,
-    DEFAULT,  // Initial state. When first executed, patches to one
-              // of the following states depending on the operands types.
-    HEAP_NUMBERS,  // Both arguments are HeapNumbers.
-    STRINGS,  // At least one of the arguments is String.
-    GENERIC   // Non-specialized case (processes any type combination).
-  };
-
-  explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
-  void patch(Code* code);
-
-  static const char* GetName(TypeInfo type_info);
-
-  static State ToState(TypeInfo type_info);
-
-  static TypeInfo GetTypeInfo(Object* left, Object* right);
-};
-
-
 // Type Recording BinaryOpIC, that records the types of the inputs and outputs.
 class TRBinaryOpIC: public IC {
  public:
diff --git a/src/isolate.cc b/src/isolate.cc
index cc9bc37..b8a7fb7 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -701,6 +701,33 @@
 }
 
 
+void Isolate::PropagatePendingExceptionToExternalTryCatch() {
+  ASSERT(has_pending_exception());
+
+  bool external_caught = IsExternallyCaught();
+  thread_local_top_.external_caught_exception_ = external_caught;
+
+  if (!external_caught) return;
+
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+    // Do not propagate OOM exception: we should kill VM asap.
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    try_catch_handler()->can_continue_ = false;
+    try_catch_handler()->exception_ = heap()->null_value();
+  } else {
+    // At this point all non-object (failure) exceptions have
+    // been dealt with so this shouldn't fail.
+    ASSERT(!pending_exception()->IsFailure());
+    try_catch_handler()->can_continue_ = true;
+    try_catch_handler()->exception_ = pending_exception();
+    if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+      try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
+    }
+  }
+}
+
+
 bool Isolate::Init(Deserializer* des) {
   ASSERT(state_ != INITIALIZED);
 
diff --git a/src/isolate.h b/src/isolate.h
index 7c95738..dd0a1fe 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -179,7 +179,6 @@
   int thread_id_;
   MaybeObject* pending_exception_;
   bool has_pending_message_;
-  const char* pending_message_;
   Object* pending_message_obj_;
   Script* pending_message_script_;
   int pending_message_start_pos_;
@@ -189,6 +188,9 @@
   // unify them later.
   MaybeObject* scheduled_exception_;
   bool external_caught_exception_;
+  // True if unhandled message is being currently reported by
+  // MessageHandler::ReportMessage.
+  bool in_exception_reporting_;
   SaveContext* save_context_;
   v8::TryCatch* catcher_;
 
@@ -495,6 +497,9 @@
   bool external_caught_exception() {
     return thread_local_top_.external_caught_exception_;
   }
+  void set_external_caught_exception(bool value) {
+    thread_local_top_.external_caught_exception_ = value;
+  }
   void set_pending_exception(MaybeObject* exception) {
     thread_local_top_.pending_exception_ = exception;
   }
@@ -509,7 +514,6 @@
   }
   void clear_pending_message() {
     thread_local_top_.has_pending_message_ = false;
-    thread_local_top_.pending_message_ = NULL;
     thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
     thread_local_top_.pending_message_script_ = NULL;
   }
@@ -522,6 +526,18 @@
   bool* external_caught_exception_address() {
     return &thread_local_top_.external_caught_exception_;
   }
+  bool in_exception_reporting() {
+    return thread_local_top_.in_exception_reporting_;
+  }
+  void set_in_exception_reporting(bool value) {
+    thread_local_top_.in_exception_reporting_ = value;
+  }
+  v8::TryCatch* catcher() {
+    return thread_local_top_.catcher_;
+  }
+  void set_catcher(v8::TryCatch* catcher) {
+    thread_local_top_.catcher_ = catcher;
+  }
 
   MaybeObject** scheduled_exception_address() {
     return &thread_local_top_.scheduled_exception_;
@@ -592,6 +608,27 @@
   // JavaScript code.  If an exception is scheduled true is returned.
   bool OptionalRescheduleException(bool is_bottom_call);
 
+  class ExceptionScope {
+   public:
+    explicit ExceptionScope(Isolate* isolate) :
+      // Scope currently can only be used for regular exceptions, not
+      // failures like OOM or termination exception.
+      isolate_(isolate),
+      pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+      catcher_(isolate_->catcher())
+    { }
+
+    ~ExceptionScope() {
+      isolate_->set_catcher(catcher_);
+      isolate_->set_pending_exception(*pending_exception_);
+    }
+
+   private:
+    Isolate* isolate_;
+    Handle<Object> pending_exception_;
+    v8::TryCatch* catcher_;
+  };
+
   void SetCaptureStackTraceForUncaughtExceptions(
       bool capture,
       int frame_limit,
@@ -636,9 +673,7 @@
 
   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
   Failure* PromoteScheduledException();
-  void DoThrow(MaybeObject* exception,
-               MessageLocation* location,
-               const char* message);
+  void DoThrow(MaybeObject* exception, MessageLocation* location);
   // Checks if exception should be reported and finds out if it's
   // caught externally.
   bool ShouldReportException(bool* can_be_caught_externally,
@@ -1023,6 +1058,8 @@
 
   void FillCache();
 
+  void PropagatePendingExceptionToExternalTryCatch();
+
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 3ed5a7e..b9b2f60 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1447,7 +1447,7 @@
 
 class OffsetsVector {
  public:
-  inline OffsetsVector(int num_registers)
+  explicit inline OffsetsVector(int num_registers)
       : offsets_vector_length_(num_registers) {
     if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
       vector_ = NewArray<int>(offsets_vector_length_);
diff --git a/src/jump-target-heavy-inl.h b/src/jump-target-heavy-inl.h
deleted file mode 100644
index 0a2a569..0000000
--- a/src/jump-target-heavy-inl.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
-#define V8_JUMP_TARGET_HEAVY_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
-  FrameElement* element = &entry_frame_->elements_[index];
-  element->clear_copied();
-  if (target->is_register()) {
-    entry_frame_->set_register_location(target->reg(), index);
-  } else if (target->is_copy()) {
-    entry_frame_->elements_[target->index()].set_copied();
-  }
-  if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
-    element->set_type_info(TypeInfo::Unknown());
-  }
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_HEAVY_INL_H_
diff --git a/src/jump-target-heavy.cc b/src/jump-target-heavy.cc
deleted file mode 100644
index f73e027..0000000
--- a/src/jump-target-heavy.cc
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void JumpTarget::Jump(Result* arg) {
-  ASSERT(cgen()->has_valid_frame());
-
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-#ifdef DEBUG
-  Result::Type arg_type = arg->type();
-  Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-
-  cgen()->frame()->Push(arg);
-  DoBranch(cc, hint);
-  *arg = cgen()->frame()->Pop();
-
-  ASSERT(arg->type() == arg_type);
-  ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-#ifdef DEBUG
-  Result::Type arg0_type = arg0->type();
-  Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
-  Result::Type arg1_type = arg1->type();
-  Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
-#endif
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  DoBranch(cc, hint);
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-
-  ASSERT(arg0->type() == arg0_type);
-  ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
-  ASSERT(arg1->type() == arg1_type);
-  ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump(arg);  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-#ifdef DEBUG
-    Result::Type arg_type = arg->type();
-    Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-    cgen()->frame()->Push(arg);
-    DoBranch(cc, hint);
-    *arg = cgen()->frame()->Pop();
-    ASSERT(arg->type() == arg_type);
-    ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-  }
-}
-
-
-void JumpTarget::Bind(Result* arg) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg0);
-    cgen()->frame()->Push(arg1);
-  }
-  DoBind();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::ComputeEntryFrame() {
-  // Given: a collection of frames reaching by forward CFG edges and
-  // the directionality of the block.  Compute: an entry frame for the
-  // block.
-
-  Isolate::Current()->counters()->compute_entry_frame()->Increment();
-#ifdef DEBUG
-  if (Isolate::Current()->jump_target_compiling_deferred_code()) {
-    ASSERT(reaching_frames_.length() > 1);
-    VirtualFrame* frame = reaching_frames_[0];
-    bool all_identical = true;
-    for (int i = 1; i < reaching_frames_.length(); i++) {
-      if (!frame->Equals(reaching_frames_[i])) {
-        all_identical = false;
-        break;
-      }
-    }
-    ASSERT(!all_identical || all_identical);
-  }
-#endif
-
-  // Choose an initial frame.
-  VirtualFrame* initial_frame = reaching_frames_[0];
-
-  // A list of pointers to frame elements in the entry frame.  NULL
-  // indicates that the element has not yet been determined.
-  int length = initial_frame->element_count();
-  ZoneList<FrameElement*> elements(length);
-
-  // Initially populate the list of elements based on the initial
-  // frame.
-  for (int i = 0; i < length; i++) {
-    FrameElement element = initial_frame->elements_[i];
-    // We do not allow copies or constants in bidirectional frames.
-    if (direction_ == BIDIRECTIONAL) {
-      if (element.is_constant() || element.is_copy()) {
-        elements.Add(NULL);
-        continue;
-      }
-    }
-    elements.Add(&initial_frame->elements_[i]);
-  }
-
-  // Compute elements based on the other reaching frames.
-  if (reaching_frames_.length() > 1) {
-    for (int i = 0; i < length; i++) {
-      FrameElement* element = elements[i];
-      for (int j = 1; j < reaching_frames_.length(); j++) {
-        // Element computation is monotonic: new information will not
-        // change our decision about undetermined or invalid elements.
-        if (element == NULL || !element->is_valid()) break;
-
-        FrameElement* other = &reaching_frames_[j]->elements_[i];
-        element = element->Combine(other);
-        if (element != NULL && !element->is_copy()) {
-          ASSERT(other != NULL);
-          // We overwrite the number information of one of the incoming frames.
-          // This is safe because we only use the frame for emitting merge code.
-          // The number information of incoming frames is not used anymore.
-          element->set_type_info(TypeInfo::Combine(element->type_info(),
-                                                   other->type_info()));
-        }
-      }
-      elements[i] = element;
-    }
-  }
-
-  // Build the new frame.  A freshly allocated frame has memory elements
-  // for the parameters and some platform-dependent elements (e.g.,
-  // return address).  Replace those first.
-  entry_frame_ = new VirtualFrame();
-  int index = 0;
-  for (; index < entry_frame_->element_count(); index++) {
-    FrameElement* target = elements[index];
-    // If the element is determined, set it now.  Count registers.  Mark
-    // elements as copied exactly when they have a copy.  Undetermined
-    // elements are initially recorded as if in memory.
-    if (target != NULL) {
-      entry_frame_->elements_[index] = *target;
-      InitializeEntryElement(index, target);
-    }
-  }
-  // Then fill in the rest of the frame with new elements.
-  for (; index < length; index++) {
-    FrameElement* target = elements[index];
-    if (target == NULL) {
-      entry_frame_->elements_.Add(
-          FrameElement::MemoryElement(TypeInfo::Uninitialized()));
-    } else {
-      entry_frame_->elements_.Add(*target);
-      InitializeEntryElement(index, target);
-    }
-  }
-
-  // Allocate any still-undetermined frame elements to registers or
-  // memory, from the top down.
-  for (int i = length - 1; i >= 0; i--) {
-    if (elements[i] == NULL) {
-      // Loop over all the reaching frames to check whether the element
-      // is synced on all frames and to count the registers it occupies.
-      bool is_synced = true;
-      RegisterFile candidate_registers;
-      int best_count = kMinInt;
-      int best_reg_num = RegisterAllocator::kInvalidRegister;
-      TypeInfo info = TypeInfo::Uninitialized();
-
-      for (int j = 0; j < reaching_frames_.length(); j++) {
-        FrameElement element = reaching_frames_[j]->elements_[i];
-        if (direction_ == BIDIRECTIONAL) {
-          info = TypeInfo::Unknown();
-        } else if (!element.is_copy()) {
-          info = TypeInfo::Combine(info, element.type_info());
-        } else {
-          // New elements will not be copies, so get number information from
-          // backing element in the reaching frame.
-          info = TypeInfo::Combine(info,
-            reaching_frames_[j]->elements_[element.index()].type_info());
-        }
-        is_synced = is_synced && element.is_synced();
-        if (element.is_register() && !entry_frame_->is_used(element.reg())) {
-          // Count the register occurrence and remember it if better
-          // than the previous best.
-          int num = RegisterAllocator::ToNumber(element.reg());
-          candidate_registers.Use(num);
-          if (candidate_registers.count(num) > best_count) {
-            best_count = candidate_registers.count(num);
-            best_reg_num = num;
-          }
-        }
-      }
-
-      // We must have a number type information now (not for copied elements).
-      ASSERT(entry_frame_->elements_[i].is_copy()
-             || !info.IsUninitialized());
-
-      // If the value is synced on all frames, put it in memory.  This
-      // costs nothing at the merge code but will incur a
-      // memory-to-register move when the value is needed later.
-      if (is_synced) {
-        // Already recorded as a memory element.
-        // Set combined number info.
-        entry_frame_->elements_[i].set_type_info(info);
-        continue;
-      }
-
-      // Try to put it in a register.  If there was no best choice
-      // consider any free register.
-      if (best_reg_num == RegisterAllocator::kInvalidRegister) {
-        for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
-          if (!entry_frame_->is_used(j)) {
-            best_reg_num = j;
-            break;
-          }
-        }
-      }
-
-      if (best_reg_num != RegisterAllocator::kInvalidRegister) {
-        // If there was a register choice, use it.  Preserve the copied
-        // flag on the element.
-        bool is_copied = entry_frame_->elements_[i].is_copied();
-        Register reg = RegisterAllocator::ToRegister(best_reg_num);
-        entry_frame_->elements_[i] =
-            FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
-                                          TypeInfo::Uninitialized());
-        if (is_copied) entry_frame_->elements_[i].set_copied();
-        entry_frame_->set_register_location(reg, i);
-      }
-      // Set combined number info.
-      entry_frame_->elements_[i].set_type_info(info);
-    }
-  }
-
-  // If we have incoming backward edges assert we forget all number information.
-#ifdef DEBUG
-  if (direction_ == BIDIRECTIONAL) {
-    for (int i = 0; i < length; ++i) {
-      if (!entry_frame_->elements_[i].is_copy()) {
-        ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
-      }
-    }
-  }
-#endif
-
-  // The stack pointer is at the highest synced element or the base of
-  // the expression stack.
-  int stack_pointer = length - 1;
-  while (stack_pointer >= entry_frame_->expression_base_index() &&
-         !entry_frame_->elements_[stack_pointer].is_synced()) {
-    stack_pointer--;
-  }
-  entry_frame_->stack_pointer_ = stack_pointer;
-}
-
-
-FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
-  // Copy the register locations from the code generator's frame.
-  // These are the registers that will be spilled on entry to the
-  // deferred code and restored on exit.
-  int sp_offset = frame->fp_relative(frame->stack_pointer_);
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int loc = frame->register_location(i);
-    if (loc == VirtualFrame::kIllegalIndex) {
-      registers_[i] = kIgnore;
-    } else if (frame->elements_[loc].is_synced()) {
-      // Needs to be restored on exit but not saved on entry.
-      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
-    } else {
-      int offset = frame->fp_relative(loc);
-      registers_[i] = (offset < sp_offset) ? kPush : offset;
-    }
-  }
-}
-
-
-void JumpTarget::Unuse() {
-  reaching_frames_.Clear();
-  merge_labels_.Clear();
-  entry_frame_ = NULL;
-  entry_label_.Unuse();
-}
-
-
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
-  ASSERT(reaching_frames_.length() == merge_labels_.length());
-  ASSERT(entry_frame_ == NULL);
-  Label fresh;
-  merge_labels_.Add(fresh);
-  reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
-  JumpTarget::set_direction(direction);
-  ASSERT(cgen()->has_valid_frame());
-  expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
-  ASSERT(destination != NULL);
-  destination->direction_ = direction_;
-  destination->reaching_frames_.Rewind(0);
-  destination->reaching_frames_.AddAll(reaching_frames_);
-  destination->merge_labels_.Rewind(0);
-  destination->merge_labels_.AddAll(merge_labels_);
-  destination->entry_frame_ = entry_frame_;
-  destination->entry_label_ = entry_label_;
-  destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump();  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-    DoBranch(cc, hint);
-  }
-}
-
-
-DeferredCode::DeferredCode()
-    : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
-      statement_position_(masm_->positions_recorder()->
-                          current_statement_position()),
-      position_(masm_->positions_recorder()->current_position()),
-      frame_state_(CodeGeneratorScope::Current(Isolate::Current())->frame()) {
-  ASSERT(statement_position_ != RelocInfo::kNoPosition);
-  ASSERT(position_ != RelocInfo::kNoPosition);
-
-  CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-#ifdef DEBUG
-  comment_ = "";
-#endif
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
deleted file mode 100644
index bf97756..0000000
--- a/src/jump-target-heavy.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_H_
-#define V8_JUMP_TARGET_HEAVY_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code.  It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths.  When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame.  For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
- public:
-  // Forward-only jump targets can only be reached by forward CFG edges.
-  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
-  // Construct a jump target used to generate code and to provide
-  // access to a current frame.
-  explicit JumpTarget(Directionality direction)
-      : direction_(direction),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  // Construct a jump target.
-  JumpTarget()
-      : direction_(FORWARD_ONLY),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  virtual ~JumpTarget() {}
-
-  // Set the direction of the jump target.
-  virtual void set_direction(Directionality direction) {
-    direction_ = direction;
-  }
-
-  // Treat the jump target as a fresh one.  The state is reset.
-  void Unuse();
-
-  inline CodeGenerator* cgen();
-
-  Label* entry_label() { return &entry_label_; }
-
-  VirtualFrame* entry_frame() const { return entry_frame_; }
-  void set_entry_frame(VirtualFrame* frame) {
-    entry_frame_ = frame;
-  }
-
-  // Predicates testing the state of the encapsulated label.
-  bool is_bound() const { return entry_label_.is_bound(); }
-  bool is_linked() const {
-    return !is_bound() && !reaching_frames_.is_empty();
-  }
-  bool is_unused() const {
-    // This is !is_bound() && !is_linked().
-    return !is_bound() && reaching_frames_.is_empty();
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.  The arg is a result that is live both at
-  // the target and the fall-through.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-  void Branch(Condition cc,
-              Result* arg0,
-              Result* arg1,
-              Hint hint = no_hint);
-
-  // Bind a jump target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-  void Bind(Result* arg0, Result* arg1);
-
-  // Emit a call to a jump target.  There must be a current frame at
-  // the call.  The frame at the target is the same as the current
-  // frame except for an extra return address on top of it.  The frame
-  // after the call is the same as the frame before the call.
-  void Call();
-
- protected:
-  // Directionality flag set at initialization time.
-  Directionality direction_;
-
-  // A list of frames reaching this block via forward jumps.
-  ZoneList<VirtualFrame*> reaching_frames_;
-
-  // A parallel list of labels for merge code.
-  ZoneList<Label> merge_labels_;
-
-  // The frame used on entry to the block and expected at backward
-  // jumps to the block.  Set when the jump target is bound, but may
-  // or may not be set for forward-only blocks.
-  VirtualFrame* entry_frame_;
-
-  // The actual entry label of the block.
-  Label entry_label_;
-
-  // Implementations of Jump, Branch, and Bind with all arguments and
-  // return values using the virtual frame.
-  void DoJump();
-  void DoBranch(Condition cc, Hint hint);
-  void DoBind();
-
- private:
-  // Add a virtual frame reaching this labeled block via a forward jump,
-  // and a corresponding merge code label.
-  void AddReachingFrame(VirtualFrame* frame);
-
-  // Perform initialization required during entry frame computation
-  // after setting the virtual frame element at index in frame to be
-  // target.
-  inline void InitializeEntryElement(int index, FrameElement* target);
-
-  // Compute a frame to use for entry to this block.
-  void ComputeEntryFrame();
-
-  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally).  They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
-  // Construct a break target.
-  BreakTarget() {}
-  explicit BreakTarget(JumpTarget::Directionality direction)
-    : JumpTarget(direction) { }
-
-  virtual ~BreakTarget() {}
-
-  // Set the direction of the break target.
-  virtual void set_direction(Directionality direction);
-
-  // Copy the state of this break target to the destination.  The
-  // lists of forward-reaching frames and merge-point labels are
-  // copied.  All virtual frame pointers are copied, not the
-  // pointed-to frames.  The previous state of the destination is
-  // overwritten, without deallocating pointed-to virtual frames.
-  void CopyTo(BreakTarget* destination);
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
-  // Bind a break target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-
-  // Setter for expected height.
-  void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
-  // The expected height of the expression stack where the target will
-  // be bound, statically known at initialization time.
-  int expected_height_;
-
-  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_HEAVY_H_
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
deleted file mode 100644
index 545328c..0000000
--- a/src/jump-target-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_INL_H_
-#define V8_JUMP_TARGET_INL_H_
-
-#include "virtual-frame-inl.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "jump-target-heavy-inl.h"
-#else
-#include "jump-target-light-inl.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-CodeGenerator* JumpTarget::cgen() {
-  return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_INL_H_
diff --git a/src/jump-target-light-inl.h b/src/jump-target-light-inl.h
deleted file mode 100644
index e8f1a5f..0000000
--- a/src/jump-target-light-inl.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
-#define V8_JUMP_TARGET_LIGHT_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Construct a jump target.
-JumpTarget::JumpTarget(Directionality direction)
-    : entry_frame_set_(false),
-      direction_(direction),
-      entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-JumpTarget::JumpTarget()
-    : entry_frame_set_(false),
-      direction_(FORWARD_ONLY),
-      entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-
-BreakTarget::BreakTarget() { }
-BreakTarget::BreakTarget(JumpTarget::Directionality direction)
-  : JumpTarget(direction) { }
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/src/jump-target-light.cc b/src/jump-target-light.cc
deleted file mode 100644
index 1d89474..0000000
--- a/src/jump-target-light.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-DeferredCode::DeferredCode()
-    : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
-      statement_position_(masm_->positions_recorder()->
-                          current_statement_position()),
-      position_(masm_->positions_recorder()->current_position()),
-      frame_state_(*CodeGeneratorScope::Current(Isolate::Current())->frame()) {
-  ASSERT(statement_position_ != RelocInfo::kNoPosition);
-  ASSERT(position_ != RelocInfo::kNoPosition);
-
-  CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-
-#ifdef DEBUG
-  comment_ = "";
-#endif
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-
-void BreakTarget::SetExpectedHeight() {
-  expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::Jump() {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    cgen()->frame()->Drop(count);
-  }
-  DoJump();
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
-  if (cc == al) {
-    Jump();
-    return;
-  }
-
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    // Emit merge code.
-    cgen()->frame()->Drop(count);
-    DoJump();
-    fall_through.Bind();
-  } else {
-    DoBranch(cc, hint);
-  }
-}
-
-
-void BreakTarget::Bind() {
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    if (count > 0) {
-      cgen()->frame()->Drop(count);
-    }
-  }
-  DoBind();
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
deleted file mode 100644
index 0d65306..0000000
--- a/src/jump-target-light.h
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_H_
-#define V8_JUMP_TARGET_LIGHT_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-#include "virtual-frame.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code.  It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths.  When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame.  For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
- public:
-  // Forward-only jump targets can only be reached by forward CFG edges.
-  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
-  // Construct a jump target.
-  explicit inline JumpTarget(Directionality direction);
-
-  inline JumpTarget();
-
-  virtual ~JumpTarget() {}
-
-  void Unuse() {
-    entry_frame_set_ = false;
-    entry_label_.Unuse();
-  }
-
-  inline CodeGenerator* cgen();
-
-  Label* entry_label() { return &entry_label_; }
-
-  const VirtualFrame* entry_frame() const {
-    return entry_frame_set_ ? &entry_frame_ : NULL;
-  }
-
-  void set_entry_frame(VirtualFrame* frame) {
-    entry_frame_ = *frame;
-    entry_frame_set_ = true;
-  }
-
-  // Predicates testing the state of the encapsulated label.
-  bool is_bound() const { return entry_label_.is_bound(); }
-  bool is_linked() const { return entry_label_.is_linked(); }
-  bool is_unused() const { return entry_label_.is_unused(); }
-
-  // Copy the state of this jump target to the destination.
-  inline void CopyTo(JumpTarget* destination) {
-    *destination = *this;
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-
-  // Bind a jump target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-
-  // Emit a call to a jump target.  There must be a current frame at
-  // the call.  The frame at the target is the same as the current
-  // frame except for an extra return address on top of it.  The frame
-  // after the call is the same as the frame before the call.
-  void Call();
-
- protected:
-  // Has an entry frame been found?
-  bool entry_frame_set_;
-
-  // Can we branch backwards to this label?
-  Directionality direction_;
-
-  // The frame used on entry to the block and expected at backward
-  // jumps to the block.  Set the first time something branches to this
-  // jump target.
-  VirtualFrame entry_frame_;
-
-  // The actual entry label of the block.
-  Label entry_label_;
-
-  // Implementations of Jump, Branch, and Bind with all arguments and
-  // return values using the virtual frame.
-  void DoJump();
-  void DoBranch(Condition cc, Hint hint);
-  void DoBind();
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally).  They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
-  // Construct a break target.
-  inline BreakTarget();
-
-  inline BreakTarget(JumpTarget::Directionality direction);
-
-  virtual ~BreakTarget() {}
-
-  // Copy the state of this jump target to the destination.
-  inline void CopyTo(BreakTarget* destination) {
-    *destination = *this;
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-
-  // Bind a break target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-
-  // Setter for expected height.
-  void set_expected_height(int expected) { expected_height_ = expected; }
-
-  // Uses the current frame to set the expected height.
-  void SetExpectedHeight();
-
- private:
-  // The expected height of the expression stack where the target will
-  // be bound, statically known at initialization time.
-  int expected_height_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_LIGHT_H_
diff --git a/src/jump-target.cc b/src/jump-target.cc
deleted file mode 100644
index 72aada8..0000000
--- a/src/jump-target.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-void JumpTarget::Jump() {
-  DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Hint hint) {
-  DoBranch(cc, hint);
-}
-
-
-void JumpTarget::Bind() {
-  DoBind();
-}
-
-
-// -------------------------------------------------------------------------
-// ShadowTarget implementation.
-
-ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
-  ASSERT(shadowed != NULL);
-  other_target_ = shadowed;
-
-#ifdef DEBUG
-  is_shadowing_ = true;
-#endif
-  // While shadowing this shadow target saves the state of the original.
-  shadowed->CopyTo(this);
-
-  // The original's state is reset.
-  shadowed->Unuse();
-  ASSERT(cgen()->has_valid_frame());
-  shadowed->set_expected_height(cgen()->frame()->height());
-}
-
-
-void ShadowTarget::StopShadowing() {
-  ASSERT(is_shadowing_);
-
-  // The states of this target, which was shadowed, and the original
-  // target, which was shadowing, are swapped.
-  BreakTarget temp;
-  other_target_->CopyTo(&temp);
-  CopyTo(other_target_);
-  temp.CopyTo(this);
-  temp.Unuse();
-
-#ifdef DEBUG
-  is_shadowing_ = false;
-#endif
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target.h b/src/jump-target.h
deleted file mode 100644
index a0d2686..0000000
--- a/src/jump-target.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_H_
-#define V8_JUMP_TARGET_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_X64
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_ARM
-#include "jump-target-light.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "jump-target-light.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Shadow break targets
-//
-// A shadow break target represents a break target that is temporarily
-// shadowed by another one (represented by the original during
-// shadowing).  They are used to catch jumps to labels in certain
-// contexts, e.g. try blocks.  After shadowing ends, the formerly
-// shadowed target is again represented by the original and the
-// ShadowTarget can be used as a jump target in its own right,
-// representing the formerly shadowing target.
-
-class ShadowTarget : public BreakTarget {
- public:
-  // Construct a shadow jump target.  After construction the shadow
-  // target object holds the state of the original target, and the
-  // original target is actually a fresh one that intercepts control
-  // flow intended for the shadowed one.
-  explicit ShadowTarget(BreakTarget* shadowed);
-
-  virtual ~ShadowTarget() {}
-
-  // End shadowing.  After shadowing ends, the original jump target
-  // again gives access to the formerly shadowed target and the shadow
-  // target object gives access to the formerly shadowing target.
-  void StopShadowing();
-
-  // During shadowing, the currently shadowing target.  After
-  // shadowing, the target that was shadowed.
-  BreakTarget* other_target() const { return other_target_; }
-
- private:
-  // During shadowing, the currently shadowing target.  After
-  // shadowing, the target that was shadowed.
-  BreakTarget* other_target_;
-
-#ifdef DEBUG
-  bool is_shadowing_;
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_H_
diff --git a/src/log.cc b/src/log.cc
index 5e8c738..3ce2072 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1316,7 +1316,6 @@
       case Code::FUNCTION:
       case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::BINARY_OP_IC:  // fall through
       case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::STUB:
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 73bf2f2..bd36459 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1217,13 +1217,14 @@
   List<ObjectGroup*>* object_groups =
       heap()->isolate()->global_handles()->object_groups();
 
+  int last = 0;
   for (int i = 0; i < object_groups->length(); i++) {
     ObjectGroup* entry = object_groups->at(i);
-    if (entry == NULL) continue;
+    ASSERT(entry != NULL);
 
-    List<Object**>& objects = entry->objects_;
+    Object*** objects = entry->objects_;
     bool group_marked = false;
-    for (int j = 0; j < objects.length(); j++) {
+    for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
       if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
         group_marked = true;
@@ -1231,21 +1232,24 @@
       }
     }
 
-    if (!group_marked) continue;
+    if (!group_marked) {
+      (*object_groups)[last++] = entry;
+      continue;
+    }
 
-    // An object in the group is marked, so mark as gray all white heap
-    // objects in the group.
-    for (int j = 0; j < objects.length(); ++j) {
+    // An object in the group is marked, so mark all heap objects in
+    // the group.
+    for (size_t j = 0; j < entry->length_; ++j) {
       if ((*objects[j])->IsHeapObject()) {
         MarkObject(HeapObject::cast(*objects[j]));
       }
     }
 
-    // Once the entire group has been colored gray, set the object group
-    // to NULL so it won't be processed again.
-    delete entry;
-    object_groups->at(i) = NULL;
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    entry->Dispose();
   }
+  object_groups->Rewind(last);
 }
 
 
@@ -1253,26 +1257,29 @@
   List<ImplicitRefGroup*>* ref_groups =
       heap()->isolate()->global_handles()->implicit_ref_groups();
 
+  int last = 0;
   for (int i = 0; i < ref_groups->length(); i++) {
     ImplicitRefGroup* entry = ref_groups->at(i);
-    if (entry == NULL) continue;
+    ASSERT(entry != NULL);
 
-    if (!entry->parent_->IsMarked()) continue;
+    if (!(*entry->parent_)->IsMarked()) {
+      (*ref_groups)[last++] = entry;
+      continue;
+    }
 
-    List<Object**>& children = entry->children_;
-    // A parent object is marked, so mark as gray all child white heap
-    // objects.
-    for (int j = 0; j < children.length(); ++j) {
+    Object*** children = entry->children_;
+    // A parent object is marked, so mark all child heap objects.
+    for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
         MarkObject(HeapObject::cast(*children[j]));
       }
     }
 
-    // Once the entire group has been colored gray, set the  group
-    // to NULL so it won't be processed again.
-    delete entry;
-    ref_groups->at(i) = NULL;
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    entry->Dispose();
   }
+  ref_groups->Rewind(last);
 }
 
 
diff --git a/src/messages.cc b/src/messages.cc
index cab982c..0cc8251 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -56,11 +56,6 @@
 }
 
 
-void MessageHandler::ReportMessage(const char* msg) {
-  PrintF("%s\n", msg);
-}
-
-
 Handle<JSMessageObject> MessageHandler::MakeMessageObject(
     const char* type,
     MessageLocation* loc,
@@ -106,14 +101,34 @@
 }
 
 
-void MessageHandler::ReportMessage(MessageLocation* loc,
+void MessageHandler::ReportMessage(Isolate* isolate,
+                                   MessageLocation* loc,
                                    Handle<Object> message) {
+  // If we are in process of message reporting, just ignore all other requests
+  // to report a message as they are due to unhandled exceptions thrown in
+  // message callbacks.
+  if (isolate->in_exception_reporting()) {
+    PrintF("uncaught exception thrown while reporting\n");
+    return;
+  }
+  isolate->set_in_exception_reporting(true);
+
+  // We are calling into embedder's code which can throw exceptions.
+  // Thus we need to save current exception state, reset it to the clean one
+  // and ignore scheduled exceptions callbacks can throw.
+  Isolate::ExceptionScope exception_scope(isolate);
+  isolate->clear_pending_exception();
+  isolate->set_external_caught_exception(false);
+
   v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
 
   v8::NeanderArray global_listeners(FACTORY->message_listeners());
   int global_length = global_listeners.length();
   if (global_length == 0) {
     DefaultMessageReport(loc, message);
+    if (isolate->has_scheduled_exception()) {
+      isolate->clear_scheduled_exception();
+    }
   } else {
     for (int i = 0; i < global_length; i++) {
       HandleScope scope;
@@ -124,8 +139,13 @@
           FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
       Handle<Object> callback_data(listener.get(1));
       callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+      if (isolate->has_scheduled_exception()) {
+        isolate->clear_scheduled_exception();
+      }
     }
   }
+
+  isolate->set_in_exception_reporting(false);
 }
 
 
diff --git a/src/messages.h b/src/messages.h
index 48f3244..fc2162d 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -89,9 +89,6 @@
 // of message listeners registered in an environment
 class MessageHandler {
  public:
-  // Report a message (w/o JS heap allocation).
-  static void ReportMessage(const char* msg);
-
   // Returns a message object for the API to use.
   static Handle<JSMessageObject> MakeMessageObject(
       const char* type,
@@ -101,7 +98,9 @@
       Handle<JSArray> stack_frames);
 
   // Report a formatted message (needs JS allocation).
-  static void ReportMessage(MessageLocation* loc, Handle<Object> message);
+  static void ReportMessage(Isolate* isolate,
+                            MessageLocation* loc,
+                            Handle<Object> message);
 
   static void DefaultMessageReport(const MessageLocation* loc,
                                    Handle<Object> message_obj);
diff --git a/src/messages.js b/src/messages.js
index 3eb056f..e657fc0 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -190,6 +190,7 @@
       property_desc_object:         ["Property description must be an object: ", "%0"],
       redefine_disallowed:          ["Cannot redefine property: ", "%0"],
       define_disallowed:            ["Cannot define property, object is not extensible: ", "%0"],
+      non_extensible_proto:         ["%0", " is not extensible"],
       // RangeError
       invalid_array_length:         ["Invalid array length"],
       stack_overflow:               ["Maximum call stack size exceeded"],
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index be8b74e..cf30b09 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -106,7 +106,7 @@
   inline VirtualFrame();
 
   // Construct an invalid virtual frame, used by JumpTargets.
-  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+  explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
 
   // Construct a virtual frame as a clone of an existing one.
   explicit inline VirtualFrame(VirtualFrame* original);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 37c51d7..ebdf0a0 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1774,7 +1774,7 @@
 void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
   desc->Init(GetKey(descriptor_number),
              GetValue(descriptor_number),
-             GetDetails(descriptor_number));
+             PropertyDetails(GetDetails(descriptor_number)));
 }
 
 
@@ -2573,7 +2573,6 @@
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
-         kind() == BINARY_OP_IC ||
          kind() == TYPE_RECORDING_BINARY_OP_IC ||
          kind() == COMPARE_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
@@ -2582,7 +2581,6 @@
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
-         kind() == BINARY_OP_IC ||
          kind() == TYPE_RECORDING_BINARY_OP_IC ||
          kind() == COMPARE_IC);
   ASSERT(0 <= major && major < 256);
@@ -2691,18 +2689,6 @@
 }
 
 
-byte Code::binary_op_type() {
-  ASSERT(is_binary_op_stub());
-  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
-}
-
-
-void Code::set_binary_op_type(byte value) {
-  ASSERT(is_binary_op_stub());
-  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
-}
-
-
 byte Code::type_recording_binary_op_type() {
   ASSERT(is_type_recording_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
@@ -3953,6 +3939,15 @@
   set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
 }
 
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::SetEntry(int entry,
+                                      Object* key,
+                                      Object* value) {
+  SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+}
+
+
 template<typename Shape, typename Key>
 void Dictionary<Shape, Key>::SetEntry(int entry,
                                       Object* key,
diff --git a/src/objects.cc b/src/objects.cc
index 9a5357a..d211d62 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2518,7 +2518,7 @@
 
   DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details = descs->GetDetails(i);
+    PropertyDetails details(descs->GetDetails(i));
     switch (details.type()) {
       case CONSTANT_FUNCTION: {
         PropertyDetails d =
@@ -6491,7 +6491,6 @@
     case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case BINARY_OP_IC: return "BINARY_OP_IC";
     case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
     case COMPARE_IC: return "COMPARE_IC";
   }
@@ -6866,6 +6865,22 @@
   // SpiderMonkey behaves this way.
   if (!value->IsJSObject() && !value->IsNull()) return value;
 
+  // From 8.6.2 Object Internal Methods
+  // ...
+  // In addition, if [[Extensible]] is false the value of the [[Class]] and
+  // [[Prototype]] internal properties of the object may not be modified.
+  // ...
+  // Implementation specific extensions that modify [[Class]], [[Prototype]]
+  // or [[Extensible]] must not violate the invariants defined in the preceding
+  // paragraph.
+  if (!this->map()->is_extensible()) {
+    HandleScope scope;
+    Handle<Object> handle(this, heap->isolate());
+    return heap->isolate()->Throw(
+        *FACTORY->NewTypeError("non_extensible_proto",
+                               HandleVector<Object>(&handle, 1)));
+  }
+
   // Before we can set the prototype we need to be sure
   // prototype cycles are prevented.
   // It is sufficient to validate that the receiver is not in the new prototype
@@ -8086,7 +8101,7 @@
     DescriptorArray* descs = map()->instance_descriptors();
     int result = 0;
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      PropertyDetails details = descs->GetDetails(i);
+      PropertyDetails details(descs->GetDetails(i));
       if (details.IsProperty() && (details.attributes() & filter) == 0) {
         result++;
       }
@@ -9653,7 +9668,7 @@
     if (key->IsNumber()) {
       uint32_t number = static_cast<uint32_t>(key->Number());
       if (from <= number && number < to) {
-        SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
+        SetEntry(i, sentinel, sentinel);
         removed_entries++;
       }
     }
@@ -9673,7 +9688,7 @@
   if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->null_value(), heap->null_value(), Smi::FromInt(0));
+  SetEntry(entry, heap->null_value(), heap->null_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
diff --git a/src/objects.h b/src/objects.h
index 874dcbc..449a210 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -167,7 +167,7 @@
   }
 
   // Conversion for storing details as Object*.
-  inline PropertyDetails(Smi* smi);
+  explicit inline PropertyDetails(Smi* smi);
   inline Smi* AsSmi();
 
   PropertyType type() { return TypeField::decode(value_); }
@@ -1293,14 +1293,9 @@
   // is a mixture of sign, exponent and mantissa.  Our current platforms are all
   // little endian apart from non-EABI arm which is little endian with big
   // endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
   static const int kMantissaOffset = kValueOffset;
   static const int kExponentOffset = kValueOffset + 4;
-#else
-  static const int kMantissaOffset = kValueOffset + 4;
-  static const int kExponentOffset = kValueOffset;
-# define BIG_ENDIAN_FLOATING_POINT 1
-#endif
+
   static const int kSize = kValueOffset + kDoubleSize;
   static const uint32_t kSignMask = 0x80000000u;
   static const uint32_t kExponentMask = 0x7ff00000u;
@@ -2576,6 +2571,9 @@
   // Sets the entry to (key, value) pair.
   inline void SetEntry(int entry,
                        Object* key,
+                       Object* value);
+  inline void SetEntry(int entry,
+                       Object* key,
                        Object* value,
                        PropertyDetails details);
 
@@ -3239,7 +3237,6 @@
     STORE_IC,
     KEYED_STORE_IC,
     KEYED_EXTERNAL_ARRAY_STORE_IC,
-    BINARY_OP_IC,
     TYPE_RECORDING_BINARY_OP_IC,
     COMPARE_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
@@ -3308,7 +3305,6 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
-  inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
   inline bool is_type_recording_binary_op_stub() {
     return kind() == TYPE_RECORDING_BINARY_OP_IC;
   }
@@ -3366,10 +3362,6 @@
   inline ExternalArrayType external_array_type();
   inline void set_external_array_type(ExternalArrayType value);
 
-  // [binary op type]: For all BINARY_OP_IC.
-  inline byte binary_op_type();
-  inline void set_binary_op_type(byte value);
-
   // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
   inline byte type_recording_binary_op_type();
   inline void set_type_recording_binary_op_type(byte value);
@@ -5158,7 +5150,7 @@
 
 class StringHasher {
  public:
-  inline StringHasher(int length);
+  explicit inline StringHasher(int length);
 
   // Returns true if the hash of this string can be computed without
   // looking at the contents.
@@ -5905,7 +5897,7 @@
  public:
   virtual void Seek(unsigned pos);
   inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
-  inline StringInputBuffer(String* backing):
+  explicit inline StringInputBuffer(String* backing):
       unibrow::InputBuffer<String, String*, 1024>(backing) {}
 };
 
@@ -5916,7 +5908,7 @@
   virtual void Seek(unsigned pos);
   inline SafeStringInputBuffer()
       : unibrow::InputBuffer<String, String**, 256>() {}
-  inline SafeStringInputBuffer(String** backing)
+  explicit inline SafeStringInputBuffer(String** backing)
       : unibrow::InputBuffer<String, String**, 256>(backing) {}
 };
 
diff --git a/src/parser.cc b/src/parser.cc
index 22d4d3f..4fad6e4 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,7 +42,6 @@
 #include "string-stream.h"
 
 #include "ast-inl.h"
-#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -463,7 +462,7 @@
 // Parser's scope stack. The constructor sets the parser's top scope
 // to the incoming scope, and the destructor resets it.
 //
-// Additionlaly, it stores transient information used during parsing.
+// Additionally, it stores transient information used during parsing.
 // These scopes are not kept around after parsing or referenced by syntax
 // trees so they can be stack-allocated and hence used by the pre-parser.
 
@@ -497,9 +496,6 @@
   void AddProperty() { expected_property_count_++; }
   int expected_property_count() { return expected_property_count_; }
 
-  void AddLoop() { loop_count_++; }
-  bool ContainsLoops() const { return loop_count_ > 0; }
-
  private:
   // Captures the number of literals that need materialization in the
   // function.  Includes regexp literals, and boilerplate for object
@@ -514,15 +510,13 @@
   bool only_simple_this_property_assignments_;
   Handle<FixedArray> this_property_assignments_;
 
-  // Captures the number of loops inside the scope.
-  int loop_count_;
-
   // Bookkeeping
   Parser* parser_;
   // Previous values
   LexicalScope* lexical_scope_parent_;
   Scope* previous_scope_;
   int previous_with_nesting_level_;
+  unsigned previous_ast_node_id_;
 };
 
 
@@ -531,14 +525,15 @@
     expected_property_count_(0),
     only_simple_this_property_assignments_(false),
     this_property_assignments_(isolate->factory()->empty_fixed_array()),
-    loop_count_(0),
     parser_(parser),
     lexical_scope_parent_(parser->lexical_scope_),
     previous_scope_(parser->top_scope_),
-    previous_with_nesting_level_(parser->with_nesting_level_) {
+    previous_with_nesting_level_(parser->with_nesting_level_),
+    previous_ast_node_id_(isolate->ast_node_id()) {
   parser->top_scope_ = scope;
   parser->lexical_scope_ = this;
   parser->with_nesting_level_ = 0;
+  isolate->set_ast_node_id(AstNode::kFunctionEntryId + 1);
 }
 
 
@@ -547,6 +542,7 @@
   parser_->top_scope_ = previous_scope_;
   parser_->lexical_scope_ = lexical_scope_parent_;
   parser_->with_nesting_level_ = previous_with_nesting_level_;
+  parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
 }
 
 
@@ -664,8 +660,7 @@
           0,
           0,
           source->length(),
-          false,
-          lexical_scope.ContainsLoops());
+          false);
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -1909,7 +1904,7 @@
                           bool is_catch_block,
                           bool* ok) {
   // Parse the statement and collect escaping labels.
-  ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Statement* stat;
   { Target target(&this->target_stack_, &collector);
@@ -2055,7 +2050,7 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Block* try_block;
 
@@ -2078,7 +2073,7 @@
   // then we will need to collect jump targets from the catch block. Since
   // we don't know yet if there will be a finally block, we always collect
   // the jump targets.
-  ZoneList<BreakTarget*>* catch_target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* catch_target_list = new ZoneList<Label*>(0);
   TargetCollector catch_collector(catch_target_list);
   bool has_catch = false;
   if (tok == Token::CATCH) {
@@ -2163,7 +2158,6 @@
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  lexical_scope_->AddLoop();
   DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
@@ -2178,7 +2172,6 @@
   }
 
   Expression* cond = ParseExpression(true, CHECK_OK);
-  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
 
   // Allow do-statements to be terminated with and without
@@ -2196,14 +2189,12 @@
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  lexical_scope_->AddLoop();
   WhileStatement* loop = new(zone()) WhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   Expression* cond = ParseExpression(true, CHECK_OK);
-  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
@@ -2216,7 +2207,6 @@
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
-  lexical_scope_->AddLoop();
   Statement* init = NULL;
 
   Expect(Token::FOR, CHECK_OK);
@@ -2285,7 +2275,6 @@
   Expression* cond = NULL;
   if (peek() != Token::SEMICOLON) {
     cond = ParseExpression(true, CHECK_OK);
-    if (cond != NULL) cond->set_is_loop_condition(true);
   }
   Expect(Token::SEMICOLON, CHECK_OK);
 
@@ -2593,9 +2582,10 @@
     }
 
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment =
-        new(zone()) IncrementOperation(op, expression);
-    return new(zone()) CountOperation(true /* prefix */, increment, position);
+    return new(zone()) CountOperation(op,
+                                      true /* prefix */,
+                                      expression,
+                                      position);
 
   } else {
     return ParsePostfixExpression(ok);
@@ -2627,10 +2617,11 @@
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment =
-        new(zone()) IncrementOperation(next, expression);
     expression =
-        new(zone()) CountOperation(false /* postfix */, increment, position);
+        new(zone()) CountOperation(next,
+                                   false /* postfix */,
+                                   expression,
+                                   position);
   }
   return expression;
 }
@@ -3532,16 +3523,22 @@
   }
 
   int num_parameters = 0;
+  Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+  ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
+  int materialized_literal_count;
+  int expected_property_count;
+  int start_pos;
+  int end_pos;
+  bool only_simple_this_property_assignments;
+  Handle<FixedArray> this_property_assignments;
   // Parse function body.
-  { Scope* scope =
-        NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-    LexicalScope lexical_scope(this, scope, isolate());
+  { LexicalScope lexical_scope(this, scope, isolate());
     top_scope_->SetScopeName(name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    int start_pos = scanner().location().beg_pos;
+    start_pos = scanner().location().beg_pos;
     Scanner::Location name_loc = Scanner::NoLocation();
     Scanner::Location dupe_loc = Scanner::NoLocation();
     Scanner::Location reserved_loc = Scanner::NoLocation();
@@ -3578,7 +3575,6 @@
     Expect(Token::RPAREN, CHECK_OK);
 
     Expect(Token::LBRACE, CHECK_OK);
-    ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
 
     // If we have a named function expression, we add a local variable
     // declaration to the body of the function with the name of the
@@ -3606,11 +3602,6 @@
     parenthesized_function_ = false;  // The bit was set for this function only.
 
     int function_block_pos = scanner().location().beg_pos;
-    int materialized_literal_count;
-    int expected_property_count;
-    int end_pos;
-    bool only_simple_this_property_assignments;
-    Handle<FixedArray> this_property_assignments;
     if (is_lazily_compiled && pre_data() != NULL) {
       FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
       if (!entry.is_valid()) {
@@ -3685,25 +3676,24 @@
       }
       CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
     }
-
-    FunctionLiteral* function_literal =
-        new(zone()) FunctionLiteral(name,
-                            top_scope_,
-                            body,
-                            materialized_literal_count,
-                            expected_property_count,
-                            only_simple_this_property_assignments,
-                            this_property_assignments,
-                            num_parameters,
-                            start_pos,
-                            end_pos,
-                            function_name->length() > 0,
-                            lexical_scope.ContainsLoops());
-    function_literal->set_function_token_position(function_token_position);
-
-    if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
-    return function_literal;
   }
+
+  FunctionLiteral* function_literal =
+      new(zone()) FunctionLiteral(name,
+                                  scope,
+                                  body,
+                                  materialized_literal_count,
+                                  expected_property_count,
+                                  only_simple_this_property_assignments,
+                                  this_property_assignments,
+                                  num_parameters,
+                                  start_pos,
+                                  end_pos,
+                                  (function_name->length() > 0));
+  function_literal->set_function_token_position(function_token_position);
+
+  if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
+  return function_literal;
 }
 
 
@@ -3945,7 +3935,7 @@
 }
 
 
-void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
+void Parser::RegisterTargetUse(Label* target, Target* stop) {
   // Register that a break target found at the given stop in the
   // target stack has been used from the top of the target stack. Add
   // the break target to any TargetCollectors passed on the stack.
diff --git a/src/parser.h b/src/parser.h
index 78faea1..e8cd102 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -655,7 +655,7 @@
   BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
   IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
 
-  void RegisterTargetUse(BreakTarget* target, Target* stop);
+  void RegisterTargetUse(Label* target, Target* stop);
 
   // Factory methods.
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 043ad1c..c777ab4 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -376,11 +376,6 @@
 }
 
 
-void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void PrettyPrinter::VisitCountOperation(CountOperation* node) {
   Print("(");
   if (node->is_prefix()) Print("%s", Token::String(node->op()));
@@ -609,16 +604,6 @@
   IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
       : ast_printer_(printer) {
     ast_printer_->PrintIndented(txt);
-    if (node != NULL && node->AsExpression() != NULL) {
-      Expression* expr = node->AsExpression();
-      bool printed_first = false;
-      if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
-        ast_printer_->Print(" (type = ");
-        ast_printer_->Print(StaticType::Type2String(expr->type()));
-        printed_first = true;
-      }
-      if (printed_first) ast_printer_->Print(")");
-    }
     ast_printer_->Print("\n");
     ast_printer_->inc_indent();
   }
@@ -664,18 +649,13 @@
 
 void AstPrinter::PrintLiteralWithModeIndented(const char* info,
                                               Variable* var,
-                                              Handle<Object> value,
-                                              StaticType* type) {
+                                              Handle<Object> value) {
   if (var == NULL) {
     PrintLiteralIndented(info, value, true);
   } else {
     EmbeddedVector<char, 256> buf;
     int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
                            Variable::Mode2String(var->mode()));
-    if (type->IsKnown()) {
-      pos += OS::SNPrintF(buf + pos, ", type = %s",
-                          StaticType::Type2String(type));
-    }
     OS::SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
@@ -732,8 +712,7 @@
     IndentedScope indent(this, "PARAMS");
     for (int i = 0; i < scope->num_parameters(); i++) {
       PrintLiteralWithModeIndented("VAR", scope->parameter(i),
-                                   scope->parameter(i)->name(),
-                                   scope->parameter(i)->type());
+                                   scope->parameter(i)->name());
     }
   }
 }
@@ -777,8 +756,7 @@
     // var or const declarations
     PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
                                  node->proxy()->AsVariable(),
-                                 node->proxy()->name(),
-                                 node->proxy()->AsVariable()->type());
+                                 node->proxy()->name());
   } else {
     // function declarations
     PrintIndented("FUNCTION ");
@@ -996,8 +974,7 @@
 
 
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
-  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
-                               node->type());
+  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name());
   Variable* var = node->var();
   if (var != NULL && var->rewrite() != NULL) {
     IndentedScope indent(this);
@@ -1056,22 +1033,10 @@
 }
 
 
-void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void AstPrinter::VisitCountOperation(CountOperation* node) {
   EmbeddedVector<char, 128> buf;
-  if (node->type()->IsKnown()) {
-    OS::SNPrintF(buf, "%s %s (type = %s)",
-                 (node->is_prefix() ? "PRE" : "POST"),
-                 Token::Name(node->op()),
-                 StaticType::Type2String(node->type()));
-  } else {
-    OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
-                 Token::Name(node->op()));
-  }
+  OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+               Token::Name(node->op()));
   PrintIndentedVisit(buf.start(), node->expression());
 }
 
@@ -1461,11 +1426,6 @@
 }
 
 
-void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
   TagScope tag(this, "CountOperation");
   {
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 284a93f..451b17e 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -104,8 +104,7 @@
   void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
   void PrintLiteralWithModeIndented(const char* info,
                                     Variable* var,
-                                    Handle<Object> value,
-                                    StaticType* type);
+                                    Handle<Object> value);
   void PrintLabelsIndented(const char* info, ZoneStringList* labels);
 
   void inc_indent() { indent_++; }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index fd3268d..4cf62e2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2295,7 +2295,7 @@
     ObjectGroup* group = groups->at(i);
     if (group->info_ == NULL) continue;
     List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
-    for (int j = 0; j < group->objects_.length(); ++j) {
+    for (size_t j = 0; j < group->length_; ++j) {
       HeapObject* obj = HeapObject::cast(*group->objects_[j]);
       list->Add(obj);
       in_groups_.Insert(obj);
diff --git a/src/property.h b/src/property.h
index fa3916e..ee95ca2 100644
--- a/src/property.h
+++ b/src/property.h
@@ -185,6 +185,13 @@
     number_ = number;
   }
 
+  void DescriptorResult(JSObject* holder, Smi* details, int number) {
+    lookup_type_ = DESCRIPTOR_TYPE;
+    holder_ = holder;
+    details_ = PropertyDetails(details);
+    number_ = number;
+  }
+
   void ConstantResult(JSObject* holder) {
     lookup_type_ = CONSTANT_TYPE;
     holder_ = holder;
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
deleted file mode 100644
index 5a68ab0..0000000
--- a/src/register-allocator-inl.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_INL_H_
-#define V8_REGISTER_ALLOCATOR_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Result::Result(const Result& other) {
-  other.CopyTo(this);
-}
-
-
-Result& Result::operator=(const Result& other) {
-  if (this != &other) {
-    Unuse();
-    other.CopyTo(this);
-  }
-  return *this;
-}
-
-
-Result::~Result() {
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
-  }
-}
-
-
-void Result::Unuse() {
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
-  }
-  invalidate();
-}
-
-
-void Result::CopyTo(Result* destination) const {
-  destination->value_ = value_;
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg());
-  }
-}
-
-
-bool RegisterAllocator::is_used(Register reg) {
-  return registers_.is_used(ToNumber(reg));
-}
-
-
-int RegisterAllocator::count(Register reg) {
-  return registers_.count(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Use(Register reg) {
-  registers_.Use(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Unuse(Register reg) {
-  registers_.Unuse(ToNumber(reg));
-}
-
-
-TypeInfo Result::type_info() const {
-  ASSERT(is_valid());
-  return TypeInfo::FromInt(TypeInfoField::decode(value_));
-}
-
-
-void Result::set_type_info(TypeInfo info) {
-  ASSERT(is_valid());
-  value_ &= ~TypeInfoField::mask();
-  value_ |= TypeInfoField::encode(info.ToInt());
-}
-
-
-bool Result::is_number() const {
-  return type_info().IsNumber();
-}
-
-
-bool Result::is_smi() const {
-  return type_info().IsSmi();
-}
-
-
-bool Result::is_integer32() const {
-  return type_info().IsInteger32();
-}
-
-
-bool Result::is_double() const {
-  return type_info().IsDouble();
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
deleted file mode 100644
index cb5e35f..0000000
--- a/src/register-allocator.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-
-Result::Result(Register reg, TypeInfo info) {
-  ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
-  CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg);
-  value_ = TypeField::encode(REGISTER)
-      | TypeInfoField::encode(info.ToInt())
-      | DataField::encode(reg.code_);
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-
-Result RegisterAllocator::AllocateWithoutSpilling() {
-  // Return the first free register, if any.
-  int num = registers_.ScanForFreeRegister();
-  if (num == RegisterAllocator::kInvalidRegister) {
-    return Result();
-  }
-  return Result(RegisterAllocator::ToRegister(num));
-}
-
-
-Result RegisterAllocator::Allocate() {
-  Result result = AllocateWithoutSpilling();
-  if (!result.is_valid()) {
-    // Ask the current frame to spill a register.
-    ASSERT(cgen_->has_valid_frame());
-    Register free_reg = cgen_->frame()->SpillAnyRegister();
-    if (free_reg.is_valid()) {
-      ASSERT(!is_used(free_reg));
-      return Result(free_reg);
-    }
-  }
-  return result;
-}
-
-
-Result RegisterAllocator::Allocate(Register target) {
-  // If the target is not referenced, it can simply be allocated.
-  if (!is_used(RegisterAllocator::ToNumber(target))) {
-    return Result(target);
-  }
-  // If the target is only referenced in the frame, it can be spilled and
-  // then allocated.
-  ASSERT(cgen_->has_valid_frame());
-  if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
-      count(target) == 1)  {
-    cgen_->frame()->Spill(target);
-    ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
-    return Result(target);
-  }
-  // Otherwise (if it's referenced outside the frame) we cannot allocate it.
-  return Result();
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/register-allocator.h b/src/register-allocator.h
deleted file mode 100644
index f0ef9c3..0000000
--- a/src/register-allocator.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
-
-#include "macro-assembler.h"
-#include "type-info.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Results
-//
-// Results encapsulate the compile-time values manipulated by the code
-// generator.  They can represent registers or constants.
-
-class Result BASE_EMBEDDED {
- public:
-  enum Type {
-    INVALID,
-    REGISTER,
-    CONSTANT
-  };
-
-  // Construct an invalid result.
-  Result() { invalidate(); }
-
-  // Construct a register Result.
-  explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown());
-
-  // Construct a Result whose value is a compile-time constant.
-  explicit Result(Handle<Object> value) {
-    ZoneObjectList* constant_list = Isolate::Current()->result_constant_list();
-    TypeInfo info = TypeInfo::TypeFromValue(value);
-    value_ = TypeField::encode(CONSTANT)
-        | TypeInfoField::encode(info.ToInt())
-        | IsUntaggedInt32Field::encode(false)
-        | DataField::encode(constant_list->length());
-    constant_list->Add(value);
-  }
-
-  // The copy constructor and assignment operators could each create a new
-  // register reference.
-  inline Result(const Result& other);
-
-  inline Result& operator=(const Result& other);
-
-  inline ~Result();
-
-  inline void Unuse();
-
-  Type type() const { return TypeField::decode(value_); }
-
-  void invalidate() { value_ = TypeField::encode(INVALID); }
-
-  inline TypeInfo type_info() const;
-  inline void set_type_info(TypeInfo info);
-  inline bool is_number() const;
-  inline bool is_smi() const;
-  inline bool is_integer32() const;
-  inline bool is_double() const;
-
-  bool is_valid() const { return type() != INVALID; }
-  bool is_register() const { return type() == REGISTER; }
-  bool is_constant() const { return type() == CONSTANT; }
-
-  // An untagged int32 Result contains a signed int32 in a register
-  // or as a constant.  These are only allowed in a side-effect-free
-  // int32 calculation, and if a non-int32 input shows up or an overflow
-  // occurs, we bail out and drop all the int32 values.  Constants are
-  // not converted to int32 until they are loaded into a register.
-  bool is_untagged_int32() const {
-    return IsUntaggedInt32Field::decode(value_);
-  }
-  void set_untagged_int32(bool value) {
-    value_ &= ~IsUntaggedInt32Field::mask();
-    value_ |= IsUntaggedInt32Field::encode(value);
-  }
-
-  Register reg() const {
-    ASSERT(is_register());
-    uint32_t reg = DataField::decode(value_);
-    Register result;
-    result.code_ = reg;
-    return result;
-  }
-
-  Handle<Object> handle() const {
-    ASSERT(type() == CONSTANT);
-    return Isolate::Current()->result_constant_list()->
-        at(DataField::decode(value_));
-  }
-
-  // Move this result to an arbitrary register.  The register is not
-  // necessarily spilled from the frame or even singly-referenced outside
-  // it.
-  void ToRegister();
-
-  // Move this result to a specified register.  The register is spilled from
-  // the frame, and the register is singly-referenced (by this result)
-  // outside the frame.
-  void ToRegister(Register reg);
-
- private:
-  uint32_t value_;
-
-  // Declare BitFields with template parameters <type, start, size>.
-  class TypeField: public BitField<Type, 0, 2> {};
-  class TypeInfoField : public BitField<int, 2, 6> {};
-  class IsUntaggedInt32Field : public BitField<bool, 8, 1> {};
-  class DataField: public BitField<uint32_t, 9, 32 - 9> {};
-
-  inline void CopyTo(Result* destination) const;
-
-  friend class CodeGeneratorScope;
-};
-
-
-// -------------------------------------------------------------------------
-// Register file
-//
-// The register file tracks reference counts for the processor registers.
-// It is used by both the register allocator and the virtual frame.
-
-class RegisterFile BASE_EMBEDDED {
- public:
-  RegisterFile() { Reset(); }
-
-  void Reset() {
-    for (int i = 0; i < kNumRegisters; i++) {
-      ref_counts_[i] = 0;
-    }
-  }
-
-  // Predicates and accessors for the reference counts.
-  bool is_used(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    return ref_counts_[num] > 0;
-  }
-
-  int count(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    return ref_counts_[num];
-  }
-
-  // Record a use of a register by incrementing its reference count.
-  void Use(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    ref_counts_[num]++;
-  }
-
-  // Record that a register will no longer be used by decrementing its
-  // reference count.
-  void Unuse(int num) {
-    ASSERT(is_used(num));
-    ref_counts_[num]--;
-  }
-
-  // Copy the reference counts from this register file to the other.
-  void CopyTo(RegisterFile* other) {
-    for (int i = 0; i < kNumRegisters; i++) {
-      other->ref_counts_[i] = ref_counts_[i];
-    }
-  }
-
- private:
-  // C++ doesn't like zero length arrays, so we make the array length 1 even if
-  // we don't need it.
-  static const int kNumRegisters =
-      (RegisterAllocatorConstants::kNumRegisters == 0) ?
-      1 : RegisterAllocatorConstants::kNumRegisters;
-
-  int ref_counts_[kNumRegisters];
-
-  // Very fast inlined loop to find a free register.  Used in
-  // RegisterAllocator::AllocateWithoutSpilling.  Returns
-  // kInvalidRegister if no free register found.
-  int ScanForFreeRegister() {
-    for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
-      if (!is_used(i)) return i;
-    }
-    return RegisterAllocatorConstants::kInvalidRegister;
-  }
-
-  friend class RegisterAllocator;
-};
-
-
-// -------------------------------------------------------------------------
-// Register allocator
-//
-
-class RegisterAllocator BASE_EMBEDDED {
- public:
-  static const int kNumRegisters =
-      RegisterAllocatorConstants::kNumRegisters;
-  static const int kInvalidRegister =
-      RegisterAllocatorConstants::kInvalidRegister;
-
-  explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
-
-  // True if the register is reserved by the code generator, false if it
-  // can be freely used by the allocator Defined in the
-  // platform-specific XXX-inl.h files..
-  static inline bool IsReserved(Register reg);
-
-  // Convert between (unreserved) assembler registers and allocator
-  // numbers.  Defined in the platform-specific XXX-inl.h files.
-  static inline int ToNumber(Register reg);
-  static inline Register ToRegister(int num);
-
-  // Predicates and accessors for the registers' reference counts.
-  bool is_used(int num) { return registers_.is_used(num); }
-  inline bool is_used(Register reg);
-
-  int count(int num) { return registers_.count(num); }
-  inline int count(Register reg);
-
-  // Explicitly record a reference to a register.
-  void Use(int num) { registers_.Use(num); }
-  inline void Use(Register reg);
-
-  // Explicitly record that a register will no longer be used.
-  void Unuse(int num) { registers_.Unuse(num); }
-  inline void Unuse(Register reg);
-
-  // Reset the register reference counts to free all non-reserved registers.
-  void Reset() { registers_.Reset(); }
-
-  // Initialize the register allocator for entry to a JS function.  On
-  // entry, the (non-reserved) registers used by the JS calling
-  // convention are referenced and the other (non-reserved) registers
-  // are free.
-  inline void Initialize();
-
-  // Allocate a free register and return a register result if possible or
-  // fail and return an invalid result.
-  Result Allocate();
-
-  // Allocate a specific register if possible, spilling it from the
-  // current frame if necessary, or else fail and return an invalid
-  // result.
-  Result Allocate(Register target);
-
-  // Allocate a free register without spilling any from the current
-  // frame or fail and return an invalid result.
-  Result AllocateWithoutSpilling();
-
-  // Allocate a free byte register without spilling any from the current
-  // frame or fail and return an invalid result.
-  Result AllocateByteRegisterWithoutSpilling();
-
-  // Copy the internal state to a register file, to be restored later by
-  // RestoreFrom.
-  void SaveTo(RegisterFile* register_file) {
-    registers_.CopyTo(register_file);
-  }
-
-  // Restore the internal state.
-  void RestoreFrom(RegisterFile* register_file) {
-    register_file->CopyTo(&registers_);
-  }
-
- private:
-  CodeGenerator* cgen_;
-  RegisterFile registers_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 780314d..efe8044 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,649 +36,6 @@
 namespace v8 {
 namespace internal {
 
-class AstOptimizer: public AstVisitor {
- public:
-  explicit AstOptimizer() : has_function_literal_(false) {}
-
-  void Optimize(ZoneList<Statement*>* statements);
-
- private:
-  // Used for loop condition analysis.  Cleared before visiting a loop
-  // condition, set when a function literal is visited.
-  bool has_function_literal_;
-
-  // Helpers
-  void OptimizeArguments(ZoneList<Expression*>* arguments);
-
-  // Node visitors.
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
-};
-
-
-void AstOptimizer::Optimize(ZoneList<Statement*>* statements) {
-  int len = statements->length();
-  for (int i = 0; i < len; i++) {
-    Visit(statements->at(i));
-  }
-}
-
-
-void AstOptimizer::OptimizeArguments(ZoneList<Expression*>* arguments) {
-  for (int i = 0; i < arguments->length(); i++) {
-    Visit(arguments->at(i));
-  }
-}
-
-
-void AstOptimizer::VisitBlock(Block* node) {
-  Optimize(node->statements());
-}
-
-
-void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
-  node->expression()->set_no_negative_zero(true);
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitIfStatement(IfStatement* node) {
-  node->condition()->set_no_negative_zero(true);
-  Visit(node->condition());
-  Visit(node->then_statement());
-  if (node->HasElseStatement()) {
-    Visit(node->else_statement());
-  }
-}
-
-
-void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
-  node->cond()->set_no_negative_zero(true);
-  Visit(node->cond());
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
-  has_function_literal_ = false;
-  node->cond()->set_no_negative_zero(true);
-  Visit(node->cond());
-  node->set_may_have_function_literal(has_function_literal_);
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitForStatement(ForStatement* node) {
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-  if (node->cond() != NULL) {
-    has_function_literal_ = false;
-    node->cond()->set_no_negative_zero(true);
-    Visit(node->cond());
-    node->set_may_have_function_literal(has_function_literal_);
-  }
-  Visit(node->body());
-  if (node->next() != NULL) {
-    Visit(node->next());
-  }
-}
-
-
-void AstOptimizer::VisitForInStatement(ForInStatement* node) {
-  Visit(node->each());
-  Visit(node->enumerable());
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
-  Visit(node->try_block());
-  Visit(node->catch_var());
-  Visit(node->catch_block());
-}
-
-
-void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  Visit(node->try_block());
-  Visit(node->finally_block());
-}
-
-
-void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
-  node->tag()->set_no_negative_zero(true);
-  Visit(node->tag());
-  for (int i = 0; i < node->cases()->length(); i++) {
-    CaseClause* clause = node->cases()->at(i);
-    if (!clause->is_default()) {
-      Visit(clause->label());
-    }
-    Optimize(clause->statements());
-  }
-}
-
-
-void AstOptimizer::VisitContinueStatement(ContinueStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitBreakStatement(BreakStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitDeclaration(Declaration* node) {
-  // Will not be reached by the current optimizations.
-  USE(node);
-}
-
-
-void AstOptimizer::VisitEmptyStatement(EmptyStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitReturnStatement(ReturnStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithEnterStatement(WithEnterStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithExitStatement(WithExitStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
-  has_function_literal_ = true;
-}
-
-
-void AstOptimizer::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitConditional(Conditional* node) {
-  node->condition()->set_no_negative_zero(true);
-  Visit(node->condition());
-  Visit(node->then_expression());
-  Visit(node->else_expression());
-}
-
-
-void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
-  Variable* var = node->AsVariable();
-  if (var != NULL) {
-    if (var->type()->IsKnown()) {
-      node->type()->CopyFrom(var->type());
-    } else if (node->type()->IsLikelySmi()) {
-      var->type()->SetAsLikelySmi();
-    }
-
-    if (FLAG_safe_int32_compiler) {
-      if (var->IsStackAllocated() &&
-          !var->is_arguments() &&
-          var->mode() != Variable::CONST) {
-        node->set_side_effect_free(true);
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitLiteral(Literal* node) {
-  Handle<Object> literal = node->handle();
-  if (literal->IsSmi()) {
-    node->type()->SetAsLikelySmi();
-    node->set_side_effect_free(true);
-  } else if (literal->IsHeapNumber()) {
-    if (node->to_int32()) {
-      // Any HeapNumber has an int32 value if it is the input to a bit op.
-      node->set_side_effect_free(true);
-    } else {
-      double double_value = HeapNumber::cast(*literal)->value();
-      int32_t int32_value = DoubleToInt32(double_value);
-      node->set_side_effect_free(double_value == int32_value);
-    }
-  }
-}
-
-
-void AstOptimizer::VisitRegExpLiteral(RegExpLiteral* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
-  for (int i = 0; i < node->values()->length(); i++) {
-    Visit(node->values()->at(i));
-  }
-}
-
-void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
-  for (int i = 0; i < node->properties()->length(); i++) {
-    Visit(node->properties()->at(i)->key());
-    Visit(node->properties()->at(i)->value());
-  }
-}
-
-
-void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  Visit(node->key());
-  Visit(node->value());
-}
-
-
-void AstOptimizer::VisitAssignment(Assignment* node) {
-  switch (node->op()) {
-    case Token::INIT_VAR:
-    case Token::INIT_CONST:
-    case Token::ASSIGN:
-      // No type can be infered from the general assignment.
-      break;
-    case Token::ASSIGN_BIT_OR:
-    case Token::ASSIGN_BIT_XOR:
-    case Token::ASSIGN_BIT_AND:
-    case Token::ASSIGN_SHL:
-    case Token::ASSIGN_SAR:
-    case Token::ASSIGN_SHR:
-      node->type()->SetAsLikelySmiIfUnknown();
-      node->target()->type()->SetAsLikelySmiIfUnknown();
-      node->value()->type()->SetAsLikelySmiIfUnknown();
-      node->value()->set_to_int32(true);
-      node->value()->set_no_negative_zero(true);
-      break;
-    case Token::ASSIGN_ADD:
-    case Token::ASSIGN_SUB:
-    case Token::ASSIGN_MUL:
-    case Token::ASSIGN_DIV:
-    case Token::ASSIGN_MOD:
-      if (node->type()->IsLikelySmi()) {
-        node->target()->type()->SetAsLikelySmiIfUnknown();
-        node->value()->type()->SetAsLikelySmiIfUnknown();
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  Visit(node->target());
-  Visit(node->value());
-
-  switch (node->op()) {
-    case Token::INIT_VAR:
-    case Token::INIT_CONST:
-    case Token::ASSIGN:
-      // Pure assignment copies the type from the value.
-      node->type()->CopyFrom(node->value()->type());
-      break;
-    case Token::ASSIGN_BIT_OR:
-    case Token::ASSIGN_BIT_XOR:
-    case Token::ASSIGN_BIT_AND:
-    case Token::ASSIGN_SHL:
-    case Token::ASSIGN_SAR:
-    case Token::ASSIGN_SHR:
-      // Should have been setup above already.
-      break;
-    case Token::ASSIGN_ADD:
-    case Token::ASSIGN_SUB:
-    case Token::ASSIGN_MUL:
-    case Token::ASSIGN_DIV:
-    case Token::ASSIGN_MOD:
-      if (node->type()->IsUnknown()) {
-        if (node->target()->type()->IsLikelySmi() ||
-            node->value()->type()->IsLikelySmi()) {
-          node->type()->SetAsLikelySmi();
-        }
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // Since this is an assignment. We have to propagate this node's type to the
-  // variable.
-  VariableProxy* proxy = node->target()->AsVariableProxy();
-  if (proxy != NULL) {
-    Variable* var = proxy->AsVariable();
-    if (var != NULL) {
-      StaticType* var_type = var->type();
-      if (var_type->IsUnknown()) {
-        var_type->CopyFrom(node->type());
-      } else if (var_type->IsLikelySmi()) {
-        // We do not reset likely types to Unknown.
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitThrow(Throw* node) {
-  Visit(node->exception());
-}
-
-
-void AstOptimizer::VisitProperty(Property* node) {
-  node->key()->set_no_negative_zero(true);
-  Visit(node->obj());
-  Visit(node->key());
-}
-
-
-void AstOptimizer::VisitCall(Call* node) {
-  Visit(node->expression());
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallNew(CallNew* node) {
-  Visit(node->expression());
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
-  if (node->op() == Token::ADD || node->op() == Token::SUB) {
-    node->expression()->set_no_negative_zero(node->no_negative_zero());
-  } else {
-    node->expression()->set_no_negative_zero(true);
-  }
-  Visit(node->expression());
-  if (FLAG_safe_int32_compiler) {
-    switch (node->op()) {
-      case Token::BIT_NOT:
-        node->expression()->set_no_negative_zero(true);
-        node->expression()->set_to_int32(true);
-        // Fall through.
-      case Token::ADD:
-      case Token::SUB:
-        node->set_side_effect_free(node->expression()->side_effect_free());
-        break;
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-      case Token::VOID:
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  } else if (node->op() == Token::BIT_NOT) {
-    node->expression()->set_to_int32(true);
-  }
-}
-
-
-void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
-void AstOptimizer::VisitCountOperation(CountOperation* node) {
-  // Count operations assume that they work on Smis.
-  node->expression()->set_no_negative_zero(node->is_prefix() ?
-                                           true :
-                                           node->no_negative_zero());
-  node->type()->SetAsLikelySmiIfUnknown();
-  node->expression()->type()->SetAsLikelySmiIfUnknown();
-  Visit(node->expression());
-}
-
-
-static bool CouldBeNegativeZero(AstNode* node) {
-  Literal* literal = node->AsLiteral();
-  if (literal != NULL) {
-    Handle<Object> handle = literal->handle();
-    if (handle->IsString() || handle->IsSmi()) {
-      return false;
-    } else if (handle->IsHeapNumber()) {
-      double double_value = HeapNumber::cast(*handle)->value();
-      if (double_value != 0) {
-        return false;
-      }
-    }
-  }
-  BinaryOperation* binary = node->AsBinaryOperation();
-  if (binary != NULL && Token::IsBitOp(binary->op())) {
-    return false;
-  }
-  return true;
-}
-
-
-static bool CouldBePositiveZero(AstNode* node) {
-  Literal* literal = node->AsLiteral();
-  if (literal != NULL) {
-    Handle<Object> handle = literal->handle();
-    if (handle->IsSmi()) {
-      if (Smi::cast(*handle) != Smi::FromInt(0)) {
-        return false;
-      }
-    } else if (handle->IsHeapNumber()) {
-      // Heap number literal can't be +0, because that's a Smi.
-      return false;
-    }
-  }
-  return true;
-}
-
-
-void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
-  // Depending on the operation we can propagate this node's type down the
-  // AST nodes.
-  Token::Value op = node->op();
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-      node->left()->set_no_negative_zero(true);
-      node->right()->set_no_negative_zero(node->no_negative_zero());
-      break;
-    case Token::AND:
-      node->left()->set_no_negative_zero(node->no_negative_zero());
-      node->right()->set_no_negative_zero(node->no_negative_zero());
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      node->type()->SetAsLikelySmiIfUnknown();
-      node->left()->type()->SetAsLikelySmiIfUnknown();
-      node->right()->type()->SetAsLikelySmiIfUnknown();
-      node->left()->set_to_int32(true);
-      node->right()->set_to_int32(true);
-      node->left()->set_no_negative_zero(true);
-      node->right()->set_no_negative_zero(true);
-      break;
-    case Token::MUL: {
-      VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
-      VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
-      if (lvar_proxy != NULL && rvar_proxy != NULL) {
-        Variable* lvar = lvar_proxy->AsVariable();
-        Variable* rvar = rvar_proxy->AsVariable();
-        if (lvar != NULL && rvar != NULL) {
-          if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
-            Slot* lslot = lvar->AsSlot();
-            Slot* rslot = rvar->AsSlot();
-            if (lslot->type() == rslot->type() &&
-                (lslot->type() == Slot::PARAMETER ||
-                 lslot->type() == Slot::LOCAL) &&
-                lslot->index() == rslot->index()) {
-              // A number squared doesn't give negative zero.
-              node->set_no_negative_zero(true);
-            }
-          }
-        }
-      }
-    }
-    case Token::ADD:
-    case Token::SUB:
-    case Token::DIV:
-    case Token::MOD: {
-      if (node->type()->IsLikelySmi()) {
-        node->left()->type()->SetAsLikelySmiIfUnknown();
-        node->right()->type()->SetAsLikelySmiIfUnknown();
-      }
-      if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
-                               !CouldBeNegativeZero(node->right()))) {
-        node->left()->set_no_negative_zero(true);
-        node->right()->set_no_negative_zero(true);
-      } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
-                                      !CouldBePositiveZero(node->right()))) {
-        node->left()->set_no_negative_zero(true);
-        node->right()->set_no_negative_zero(true);
-      } else {
-        node->left()->set_no_negative_zero(node->no_negative_zero());
-        node->right()->set_no_negative_zero(node->no_negative_zero());
-      }
-      if (node->op() == Token::DIV) {
-        node->right()->set_no_negative_zero(false);
-      } else if (node->op() == Token::MOD) {
-        node->right()->set_no_negative_zero(true);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  Visit(node->left());
-  Visit(node->right());
-
-  // After visiting the operand nodes we have to check if this node's type
-  // can be updated. If it does, then we can push that information down
-  // towards the leaves again if the new information is an upgrade over the
-  // previous type of the operand nodes.
-  if (node->type()->IsUnknown()) {
-    if (node->left()->type()->IsLikelySmi() ||
-        node->right()->type()->IsLikelySmi()) {
-      node->type()->SetAsLikelySmi();
-    }
-    if (node->type()->IsLikelySmi()) {
-      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
-      // down through the nodes.
-      if (node->left()->type()->IsUnknown()) {
-        node->left()->type()->SetAsLikelySmi();
-        Visit(node->left());
-      }
-      if (node->right()->type()->IsUnknown()) {
-        node->right()->type()->SetAsLikelySmi();
-        Visit(node->right());
-      }
-    }
-  }
-
-  if (FLAG_safe_int32_compiler) {
-    switch (node->op()) {
-      case Token::COMMA:
-      case Token::OR:
-      case Token::AND:
-        break;
-      case Token::BIT_OR:
-      case Token::BIT_XOR:
-      case Token::BIT_AND:
-      case Token::SHL:
-      case Token::SAR:
-      case Token::SHR:
-        // Add one to the number of bit operations in this expression.
-        node->set_num_bit_ops(1);
-        // Fall through.
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV:
-      case Token::MOD:
-        node->set_side_effect_free(node->left()->side_effect_free() &&
-                                   node->right()->side_effect_free());
-        node->set_num_bit_ops(node->num_bit_ops() +
-                                  node->left()->num_bit_ops() +
-                                  node->right()->num_bit_ops());
-        if (!node->no_negative_zero() && node->op() == Token::MUL) {
-          node->set_side_effect_free(false);
-        }
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
-  if (node->type()->IsKnown()) {
-    // Propagate useful information down towards the leaves.
-    node->left()->type()->SetAsLikelySmiIfUnknown();
-    node->right()->type()->SetAsLikelySmiIfUnknown();
-  }
-
-  node->left()->set_no_negative_zero(true);
-  // Only [[HasInstance]] has the right argument passed unchanged to it.
-  node->right()->set_no_negative_zero(true);
-
-  Visit(node->left());
-  Visit(node->right());
-
-  // After visiting the operand nodes we have to check if this node's type
-  // can be updated. If it does, then we can push that information down
-  // towards the leaves again if the new information is an upgrade over the
-  // previous type of the operand nodes.
-  if (node->type()->IsUnknown()) {
-    if (node->left()->type()->IsLikelySmi() ||
-        node->right()->type()->IsLikelySmi()) {
-      node->type()->SetAsLikelySmi();
-    }
-    if (node->type()->IsLikelySmi()) {
-      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
-      // down through the nodes.
-      if (node->left()->type()->IsUnknown()) {
-        node->left()->type()->SetAsLikelySmi();
-        Visit(node->left());
-      }
-      if (node->right()->type()->IsUnknown()) {
-        node->right()->type()->SetAsLikelySmi();
-        Visit(node->right());
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitThisFunction(ThisFunction* node) {
-  USE(node);
-}
-
-
 class Processor: public AstVisitor {
  public:
   explicit Processor(Variable* result)
@@ -943,11 +300,6 @@
 }
 
 
-void Processor::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void Processor::VisitCountOperation(CountOperation* node) {
   USE(node);
   UNREACHABLE();
@@ -1005,20 +357,4 @@
 }
 
 
-// Assumes code has been parsed and scopes have been analyzed.  Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
-bool Rewriter::Analyze(CompilationInfo* info) {
-  FunctionLiteral* function = info->function();
-  ASSERT(function != NULL && function->scope() != NULL);
-
-  ZoneList<Statement*>* body = function->body();
-  if (FLAG_optimize_ast && !body->is_empty()) {
-    AstOptimizer optimizer;
-    optimizer.Optimize(body);
-    if (optimizer.HasStackOverflow()) return false;
-  }
-  return true;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/rewriter.h b/src/rewriter.h
index 62e1b7f..59914d9 100644
--- a/src/rewriter.h
+++ b/src/rewriter.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,15 +42,6 @@
   // Assumes code has been parsed and scopes have been analyzed.  Mutates the
   // AST, so the AST should not continue to be used in the case of failure.
   static bool Rewrite(CompilationInfo* info);
-
-  // Perform a suite of simple non-iterative analyses of the AST.  Mark
-  // expressions that are likely smis, expressions without side effects,
-  // expressions whose value will be converted to Int32, and expressions in a
-  // context where +0 and -0 are treated the same.
-  //
-  // Assumes code has been parsed and scopes have been analyzed.  Mutates the
-  // AST, so the AST should not continue to be used in the case of failure.
-  static bool Analyze(CompilationInfo* info);
 };
 
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 28755e3..6e25169 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -130,9 +130,11 @@
       js_ratio_(0),
       sampler_window_position_(0),
       optimize_soon_list_(NULL),
-      state_window_position_(0) {
-  state_counts_[0] = kStateWindowSize;
-  state_counts_[1] = 0;
+      state_window_position_(0),
+      state_window_ticks_(0) {
+  state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
+  state_counts_[IN_JS_STATE] = 0;
+  STATIC_ASSERT(IN_NON_JS_STATE == 0);
   memset(state_window_, 0, sizeof(state_window_));
   ClearSampleBuffer();
 }
@@ -344,8 +346,12 @@
   ASSERT(IsPowerOf2(kStateWindowSize));
   state_window_position_ = (state_window_position_ + 1) &
       (kStateWindowSize - 1);
+  // Note: to calculate correct ratio we have to track how many valid
+  // ticks are actually in the state window, because on profiler
+  // startup this number can be less than the window size.
+  state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
   NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
-                  kStateWindowSize);
+                  state_window_ticks_);
 }
 #endif
 
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 8074035..3656893 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -40,13 +40,6 @@
 class PendingListNode;
 class Semaphore;
 
-
-enum SamplerState {
-  IN_NON_JS_STATE = 0,
-  IN_JS_STATE = 1
-};
-
-
 class RuntimeProfiler {
  public:
   explicit RuntimeProfiler(Isolate* isolate);
@@ -101,6 +94,11 @@
   static const int kSamplerWindowSize = 16;
   static const int kStateWindowSize = 128;
 
+  enum SamplerState {
+    IN_NON_JS_STATE = 0,
+    IN_JS_STATE = 1
+  };
+
   static void HandleWakeUp(Isolate* isolate);
 
   void Optimize(JSFunction* function, bool eager, int delay);
@@ -137,6 +135,7 @@
 
   SamplerState state_window_[kStateWindowSize];
   int state_window_position_;
+  int state_window_ticks_;
   int state_counts_[2];
 
   // Possible state values:
diff --git a/src/runtime.cc b/src/runtime.cc
index ff9f914..c7ff3a3 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7323,14 +7323,13 @@
   ASSERT(isolate->heap()->IsAllocationAllowed());
   int frames = deoptimizer->output_count();
 
+  deoptimizer->MaterializeHeapNumbers();
+  delete deoptimizer;
+
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = NULL;
-  for (int i = 0; i < frames; i++) {
-    if (i != 0) it.Advance();
-    frame = it.frame();
-    deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
-  }
-  delete deoptimizer;
+  for (int i = 0; i < frames - 1; i++) it.Advance();
+  frame = it.frame();
 
   RUNTIME_ASSERT(frame->function()->IsJSFunction());
   Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
@@ -7821,8 +7820,17 @@
     // The property exists in the extension context.
     context_ext = Handle<JSObject>::cast(holder);
   } else {
-    // The property was not found. It needs to be stored in the global context.
+    // The property was not found.
     ASSERT(attributes == ABSENT);
+
+    if (strict_mode == kStrictMode) {
+      // Throw in strict mode (assignment to undefined variable).
+      Handle<Object> error =
+        isolate->factory()->NewReferenceError(
+            "not_defined", HandleVector(&name, 1));
+      return isolate->Throw(*error);
+    }
+    // In non-strict mode, the property is stored in the global context.
     attributes = NONE;
     context_ext = Handle<JSObject>(isolate->context()->global());
   }
@@ -9821,6 +9829,10 @@
       at_local_ = index < 0;
     } else if (context_->is_function_context()) {
       at_local_ = true;
+    } else if (context_->closure() != *function_) {
+      // The context_ is a with block from the outer function.
+      ASSERT(context_->has_extension());
+      at_local_ = true;
     }
   }
 
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index cc9f816..2552af2 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -220,7 +220,7 @@
       ASSERT(index == this->index());
     }
 
-    inline Value(uint32_t value) : value_(value) {}
+    explicit inline Value(uint32_t value) : value_(value) {}
 
     uint32_t raw() { return value_; }
 
diff --git a/src/spaces.cc b/src/spaces.cc
index eb4fa7d..674078c 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1570,7 +1570,6 @@
       CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
-      CASE(BINARY_OP_IC);
       CASE(TYPE_RECORDING_BINARY_OP_IC);
       CASE(COMPARE_IC);
     }
diff --git a/src/top.cc b/src/top.cc
index de0d92d..8611a31 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -72,6 +72,7 @@
   int id = Isolate::Current()->thread_manager()->CurrentId();
   thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
   external_caught_exception_ = false;
+  in_exception_reporting_ = false;
   failed_access_check_callback_ = NULL;
   save_context_ = NULL;
   catcher_ = NULL;
@@ -533,19 +534,19 @@
   // the message for stack overflow exceptions which is very likely to
   // double fault with another stack overflow exception, we use a
   // precomputed message.
-  DoThrow(*exception, NULL, kStackOverflowMessage);
+  DoThrow(*exception, NULL);
   return Failure::Exception();
 }
 
 
 Failure* Isolate::TerminateExecution() {
-  DoThrow(heap_.termination_exception(), NULL, NULL);
+  DoThrow(heap_.termination_exception(), NULL);
   return Failure::Exception();
 }
 
 
 Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
-  DoThrow(exception, location, NULL);
+  DoThrow(exception, location);
   return Failure::Exception();
 }
 
@@ -663,9 +664,7 @@
 }
 
 
-void Isolate::DoThrow(MaybeObject* exception,
-                      MessageLocation* location,
-                      const char* message) {
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
   ASSERT(!has_pending_exception());
 
   HandleScope scope;
@@ -722,7 +721,6 @@
 
   // Save the message for reporting if the the exception remains uncaught.
   thread_local_top()->has_pending_message_ = report_exception;
-  thread_local_top()->pending_message_ = message;
   if (!message_obj.is_null()) {
     thread_local_top()->pending_message_obj_ = *message_obj;
     if (location != NULL) {
@@ -794,55 +792,36 @@
 
 void Isolate::ReportPendingMessages() {
   ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
   // If the pending exception is OutOfMemoryException set out_of_memory in
   // the global context.  Note: We have to mark the global context here
   // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
   // set it.
-  bool external_caught = IsExternallyCaught();
-  thread_local_top()->external_caught_exception_ = external_caught;
-  HandleScope scope(this);
-  if (thread_local_top()->pending_exception_ ==
-      Failure::OutOfMemoryException()) {
+  HandleScope scope;
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
     context()->mark_out_of_memory();
-  } else if (thread_local_top()->pending_exception_ ==
-             heap_.termination_exception()) {
-    if (external_caught) {
-      try_catch_handler()->can_continue_ = false;
-      try_catch_handler()->exception_ = heap_.null_value();
-    }
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    // Do nothing: if needed, the exception has been already propagated to
+    // v8::TryCatch.
   } else {
-    // At this point all non-object (failure) exceptions have
-    // been dealt with so this shouldn't fail.
-    Object* pending_exception_object = pending_exception()->ToObjectUnchecked();
-    Handle<Object> exception(pending_exception_object);
-    thread_local_top()->external_caught_exception_ = false;
-    if (external_caught) {
-      try_catch_handler()->can_continue_ = true;
-      try_catch_handler()->exception_ = thread_local_top()->pending_exception_;
-      if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
-        try_catch_handler()->message_ =
-            thread_local_top()->pending_message_obj_;
-      }
-    }
-    if (thread_local_top()->has_pending_message_) {
-      thread_local_top()->has_pending_message_ = false;
-      if (thread_local_top()->pending_message_ != NULL) {
-        MessageHandler::ReportMessage(thread_local_top()->pending_message_);
-      } else if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
-        Handle<Object> message_obj(thread_local_top()->pending_message_obj_);
-        if (thread_local_top()->pending_message_script_ != NULL) {
-          Handle<Script> script(thread_local_top()->pending_message_script_);
-          int start_pos = thread_local_top()->pending_message_start_pos_;
-          int end_pos = thread_local_top()->pending_message_end_pos_;
+    if (thread_local_top_.has_pending_message_) {
+      thread_local_top_.has_pending_message_ = false;
+      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+        HandleScope scope;
+        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+        if (thread_local_top_.pending_message_script_ != NULL) {
+          Handle<Script> script(thread_local_top_.pending_message_script_);
+          int start_pos = thread_local_top_.pending_message_start_pos_;
+          int end_pos = thread_local_top_.pending_message_end_pos_;
           MessageLocation location(script, start_pos, end_pos);
-          MessageHandler::ReportMessage(&location, message_obj);
+          MessageHandler::ReportMessage(this, &location, message_obj);
         } else {
-          MessageHandler::ReportMessage(NULL, message_obj);
+          MessageHandler::ReportMessage(this, NULL, message_obj);
         }
       }
     }
-    thread_local_top()->external_caught_exception_ = external_caught;
-    set_pending_exception(*exception);
   }
   clear_pending_message();
 }
@@ -854,6 +833,9 @@
 
 
 bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
   // Allways reschedule out of memory exceptions.
   if (!is_out_of_memory()) {
     bool is_termination_exception =
@@ -966,7 +948,7 @@
   memcpy(reinterpret_cast<char*>(thread_local_top()), from,
          sizeof(ThreadLocalTop));
   // This might be just paranoia, but it seems to be needed in case a
-  // thread_local_ is restored on a separate OS thread.
+  // thread_local_top_ is restored on a separate OS thread.
 #ifdef USE_SIMULATOR
 #ifdef V8_TARGET_ARCH_ARM
   thread_local_top()->simulator_ = Simulator::current(this);
diff --git a/src/type-info.cc b/src/type-info.cc
index 256f48a..4069c83 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -89,7 +89,7 @@
 }
 
 
-bool TypeFeedbackOracle::StoreIsMonomorphic(Assignment* expr) {
+bool TypeFeedbackOracle::StoreIsMonomorphic(Expression* expr) {
   Handle<Object> map_or_code(GetInfo(expr->position()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
@@ -119,7 +119,7 @@
 }
 
 
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphic(expr));
   Handle<HeapObject> map_or_code(
       Handle<HeapObject>::cast(GetInfo(expr->position())));
@@ -178,7 +178,7 @@
 }
 
 ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
-    Assignment* expr) {
+    Expression* expr) {
   Handle<Object> stub = GetInfo(expr->position());
   ASSERT(stub->IsCode());
   return Code::cast(*stub)->external_array_type();
@@ -244,22 +244,7 @@
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
-  if (code->is_binary_op_stub()) {
-    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
-        code->binary_op_type());
-    switch (type) {
-      case BinaryOpIC::UNINIT_OR_SMI:
-        return TypeInfo::Smi();
-      case BinaryOpIC::DEFAULT:
-        return (expr->op() == Token::DIV || expr->op() == Token::MUL)
-            ? TypeInfo::Double()
-            : TypeInfo::Integer32();
-      case BinaryOpIC::HEAP_NUMBERS:
-        return TypeInfo::Double();
-      default:
-        return unknown;
-    }
-  } else if (code->is_type_recording_binary_op_stub()) {
+  if (code->is_type_recording_binary_op_stub()) {
     TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
         code->type_recording_binary_op_type());
     TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
@@ -391,8 +376,7 @@
     InlineCacheState state = target->ic_state();
     Code::Kind kind = target->kind();
 
-    if (kind == Code::BINARY_OP_IC ||
-        kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+    if (kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
         kind == Code::COMPARE_IC) {
       // TODO(kasperl): Avoid having multiple ICs with the same
       // position by making sure that we have position information
@@ -446,9 +430,7 @@
       if (target->is_inline_cache_stub()) {
         InlineCacheState state = target->ic_state();
         Code::Kind kind = target->kind();
-        if (kind == Code::BINARY_OP_IC) {
-          if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
-        } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
+        if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
           if (target->type_recording_binary_op_type() ==
               TRBinaryOpIC::GENERIC) {
             continue;
diff --git a/src/type-info.h b/src/type-info.h
index 9b69526..f6e6729 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -239,18 +239,18 @@
   TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
   bool LoadIsMonomorphic(Property* expr);
-  bool StoreIsMonomorphic(Assignment* expr);
+  bool StoreIsMonomorphic(Expression* expr);
   bool CallIsMonomorphic(Call* expr);
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
-  Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
+  Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
 
   ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
   ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
   ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
 
   ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
-  ExternalArrayType GetKeyedStoreExternalArrayType(Assignment* expr);
+  ExternalArrayType GetKeyedStoreExternalArrayType(Expression* expr);
 
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
diff --git a/src/v8.cc b/src/v8.cc
index f89ed83..19ed184 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -193,11 +193,7 @@
   // Setup the platform OS support.
   OS::Setup();
 
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
-  use_crankshaft_ = false;
-#else
   use_crankshaft_ = FLAG_crankshaft;
-#endif
 
   if (Serializer::enabled()) {
     use_crankshaft_ = false;
diff --git a/src/v8utils.h b/src/v8utils.h
index 87c5e7f..93fc1fd 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -120,7 +120,9 @@
 // Memory
 
 // Copies data from |src| to |dst|.  The data spans MUST not overlap.
-inline void CopyWords(Object** dst, Object** src, int num_words) {
+template <typename T>
+inline void CopyWords(T* dst, T* src, int num_words) {
+  STATIC_ASSERT(sizeof(T) == kPointerSize);
   ASSERT(Min(dst, src) + num_words <= Max(dst, src));
   ASSERT(num_words > 0);
 
diff --git a/src/variables.cc b/src/variables.cc
index fa7ce1b..0502722 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,26 +35,8 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Implementation StaticType.
-
-
-const char* StaticType::Type2String(StaticType* type) {
-  switch (type->kind_) {
-    case UNKNOWN:
-      return "UNKNOWN";
-    case LIKELY_SMI:
-      return "LIKELY_SMI";
-    default:
-      UNREACHABLE();
-  }
-  return "UNREACHABLE";
-}
-
-
-// ----------------------------------------------------------------------------
 // Implementation Variable.
 
-
 const char* Variable::Mode2String(Mode mode) {
   switch (mode) {
     case VAR: return "VAR";
diff --git a/src/variables.h b/src/variables.h
index 67e1a18..b1ff0db 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,46 +33,6 @@
 namespace v8 {
 namespace internal {
 
-// Variables and AST expression nodes can track their "type" to enable
-// optimizations and removal of redundant checks when generating code.
-
-class StaticType {
- public:
-  enum Kind {
-    UNKNOWN,
-    LIKELY_SMI
-  };
-
-  StaticType() : kind_(UNKNOWN) {}
-
-  bool Is(Kind kind) const { return kind_ == kind; }
-
-  bool IsKnown() const { return !Is(UNKNOWN); }
-  bool IsUnknown() const { return Is(UNKNOWN); }
-  bool IsLikelySmi() const { return Is(LIKELY_SMI); }
-
-  void CopyFrom(StaticType* other) {
-    kind_ = other->kind_;
-  }
-
-  static const char* Type2String(StaticType* type);
-
-  // LIKELY_SMI accessors
-  void SetAsLikelySmi() {
-    kind_ = LIKELY_SMI;
-  }
-
-  void SetAsLikelySmiIfUnknown() {
-    if (IsUnknown()) {
-      SetAsLikelySmi();
-    }
-  }
-
- private:
-  Kind kind_;
-};
-
-
 // The AST refers to variables via VariableProxies - placeholders for the actual
 // variables. Variables themselves are never directly referred to from the AST,
 // they are maintained by scopes, and referred to from VariableProxies and Slots
@@ -181,8 +141,6 @@
   Expression* rewrite() const { return rewrite_; }
   void set_rewrite(Expression* expr) { rewrite_ = expr; }
 
-  StaticType* type() { return &type_; }
-
  private:
   Scope* scope_;
   Handle<String> name_;
@@ -191,9 +149,6 @@
 
   Variable* local_if_not_shadowed_;
 
-  // Static type information
-  StaticType type_;
-
   // Code generation.
   // rewrite_ is usually a Slot or a Property, but may be any expression.
   Expression* rewrite_;
diff --git a/src/version.cc b/src/version.cc
index fa05286..9ce5e37 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      8
-#define PATCH_LEVEL       3
+#define BUILD_NUMBER      9
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h
deleted file mode 100644
index cf12eca..0000000
--- a/src/virtual-frame-heavy-inl.h
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_
-#define V8_VIRTUAL_FRAME_HEAVY_INL_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "register-allocator-inl.h"
-#include "codegen-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
-  for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
-  }
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    register_locations_[i] = kIllegalIndex;
-  }
-}
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : elements_(original->element_count()),
-      stack_pointer_(original->stack_pointer_) {
-  elements_.AddAll(original->elements_);
-  // Copy register locations from original.
-  memcpy(&register_locations_,
-         original->register_locations_,
-         sizeof(register_locations_));
-}
-
-
-void VirtualFrame::PushFrameSlotAt(int index) {
-  elements_.Add(CopyElementAt(index));
-}
-
-
-void VirtualFrame::Push(Register reg, TypeInfo info) {
-  if (is_used(reg)) {
-    int index = register_location(reg);
-    FrameElement element = CopyElementAt(index, info);
-    elements_.Add(element);
-  } else {
-    Use(reg, element_count());
-    FrameElement element =
-        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
-    elements_.Add(element);
-  }
-}
-
-
-bool VirtualFrame::ConstantPoolOverflowed() {
-  return FrameElement::ConstantPoolOverflowed();
-}
-
-
-bool VirtualFrame::Equals(VirtualFrame* other) {
-#ifdef DEBUG
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (register_location(i) != other->register_location(i)) {
-      return false;
-    }
-  }
-  if (element_count() != other->element_count()) return false;
-#endif
-  if (stack_pointer_ != other->stack_pointer_) return false;
-  for (int i = 0; i < element_count(); i++) {
-    if (!elements_[i].Equals(other->elements_[i])) return false;
-  }
-
-  return true;
-}
-
-
-void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) {
-  elements_[local0_index() + index].set_type_info(info);
-}
-
-
-// Make the type of all elements be MEMORY.
-void VirtualFrame::SpillAll() {
-  for (int i = 0; i < element_count(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::PrepareForReturn() {
-  // Spill all locals. This is necessary to make sure all locals have
-  // the right value when breaking at the return site in the debugger.
-  for (int i = 0; i < expression_base_index(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) {
-  elements_[param0_index() + index].set_type_info(info);
-}
-
-
-void VirtualFrame::Nip(int num_dropped) {
-  ASSERT(num_dropped >= 0);
-  if (num_dropped == 0) return;
-  Result tos = Pop();
-  if (num_dropped > 1) {
-    Drop(num_dropped - 1);
-  }
-  SetElementAt(0, &tos);
-}
-
-
-void VirtualFrame::Push(Smi* value) {
-  Push(Handle<Object> (value));
-}
-
-
-int VirtualFrame::register_location(Register reg) {
-  return register_locations_[RegisterAllocator::ToNumber(reg)];
-}
-
-
-void VirtualFrame::set_register_location(Register reg, int index) {
-  register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-}
-
-
-bool VirtualFrame::is_used(Register reg) {
-  return register_locations_[RegisterAllocator::ToNumber(reg)]
-      != kIllegalIndex;
-}
-
-
-void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
-  Result temp(value);
-  SetElementAt(index, &temp);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  return RawCallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() {
-  return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() {
-  return cgen()->scope()->num_stack_slots();
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/src/virtual-frame-heavy.cc b/src/virtual-frame-heavy.cc
deleted file mode 100644
index 7270280..0000000
--- a/src/virtual-frame-heavy.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::SetElementAt(int index, Result* value) {
-  int frame_index = element_count() - index - 1;
-  ASSERT(frame_index >= 0);
-  ASSERT(frame_index < element_count());
-  ASSERT(value->is_valid());
-  FrameElement original = elements_[frame_index];
-
-  // Early exit if the element is the same as the one being set.
-  bool same_register = original.is_register()
-      && value->is_register()
-      && original.reg().is(value->reg());
-  bool same_constant = original.is_constant()
-      && value->is_constant()
-      && original.handle().is_identical_to(value->handle());
-  if (same_register || same_constant) {
-    value->Unuse();
-    return;
-  }
-
-  InvalidateFrameSlotAt(frame_index);
-
-  if (value->is_register()) {
-    if (is_used(value->reg())) {
-      // The register already appears on the frame.  Either the existing
-      // register element, or the new element at frame_index, must be made
-      // a copy.
-      int i = register_location(value->reg());
-
-      if (i < frame_index) {
-        // The register FrameElement is lower in the frame than the new copy.
-        elements_[frame_index] = CopyElementAt(i);
-      } else {
-        // There was an early bailout for the case of setting a
-        // register element to itself.
-        ASSERT(i != frame_index);
-        elements_[frame_index] = elements_[i];
-        elements_[i] = CopyElementAt(frame_index);
-        if (elements_[frame_index].is_synced()) {
-          elements_[i].set_sync();
-        }
-        elements_[frame_index].clear_sync();
-        set_register_location(value->reg(), frame_index);
-        for (int j = i + 1; j < element_count(); j++) {
-          if (elements_[j].is_copy() && elements_[j].index() == i) {
-            elements_[j].set_index(frame_index);
-          }
-        }
-      }
-    } else {
-      // The register value->reg() was not already used on the frame.
-      Use(value->reg(), frame_index);
-      elements_[frame_index] =
-          FrameElement::RegisterElement(value->reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        value->type_info());
-    }
-  } else {
-    ASSERT(value->is_constant());
-    elements_[frame_index] =
-        FrameElement::ConstantElement(value->handle(),
-                                      FrameElement::NOT_SYNCED);
-  }
-  value->Unuse();
-}
-
-
-// Create a duplicate of an existing valid frame element.
-// We can pass an optional number type information that will override the
-// existing information about the backing element. The new information must
-// not conflict with the existing type information and must be equally or
-// more precise. The default parameter value kUninitialized means that there
-// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) {
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  FrameElement target = elements_[index];
-  FrameElement result;
-
-  switch (target.type()) {
-    case FrameElement::CONSTANT:
-      // We do not copy constants and instead return a fresh unsynced
-      // constant.
-      result = FrameElement::ConstantElement(target.handle(),
-                                             FrameElement::NOT_SYNCED);
-      break;
-
-    case FrameElement::COPY:
-      // We do not allow copies of copies, so we follow one link to
-      // the actual backing store of a copy before making a copy.
-      index = target.index();
-      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
-      // Fall through.
-
-    case FrameElement::MEMORY:  // Fall through.
-    case FrameElement::REGISTER: {
-      // All copies are backed by memory or register locations.
-      result.set_type(FrameElement::COPY);
-      result.clear_copied();
-      result.clear_sync();
-      result.set_index(index);
-      elements_[index].set_copied();
-      // Update backing element's number information.
-      TypeInfo existing = elements_[index].type_info();
-      ASSERT(!existing.IsUninitialized());
-      // Assert that the new type information (a) does not conflict with the
-      // existing one and (b) is equally or more precise.
-      ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
-      ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
-
-      elements_[index].set_type_info(!info.IsUninitialized()
-                                       ? info
-                                       : existing);
-      break;
-    }
-    case FrameElement::INVALID:
-      // We should not try to copy invalid elements.
-      UNREACHABLE();
-      break;
-  }
-  return result;
-}
-
-
-// Modify the state of the virtual frame to match the actual frame by adding
-// extra in-memory elements to the top of the virtual frame.  The extra
-// elements will be externally materialized on the actual frame (eg, by
-// pushing an exception handler).  No code is emitted.
-void VirtualFrame::Adjust(int count) {
-  ASSERT(count >= 0);
-  ASSERT(stack_pointer_ == element_count() - 1);
-
-  for (int i = 0; i < count; i++) {
-    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
-  }
-  stack_pointer_ += count;
-}
-
-
-void VirtualFrame::ForgetElements(int count) {
-  ASSERT(count >= 0);
-  ASSERT(element_count() >= count);
-
-  for (int i = 0; i < count; i++) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      // A hack to properly count register references for the code
-      // generator's current frame and also for other frames.  The
-      // same code appears in PrepareMergeTo.
-      if (cgen()->frame() == this) {
-        Unuse(last.reg());
-      } else {
-        set_register_location(last.reg(), kIllegalIndex);
-      }
-    }
-  }
-}
-
-
-// Make the type of the element at a given index be MEMORY.
-void VirtualFrame::SpillElementAt(int index) {
-  if (!elements_[index].is_valid()) return;
-
-  SyncElementAt(index);
-  // Number type information is preserved.
-  // Copies get their number information from their backing element.
-  TypeInfo info;
-  if (!elements_[index].is_copy()) {
-    info = elements_[index].type_info();
-  } else {
-    info = elements_[elements_[index].index()].type_info();
-  }
-  // The element is now in memory.  Its copied flag is preserved.
-  FrameElement new_element = FrameElement::MemoryElement(info);
-  if (elements_[index].is_copied()) {
-    new_element.set_copied();
-  }
-  if (elements_[index].is_untagged_int32()) {
-    new_element.set_untagged_int32(true);
-  }
-  if (elements_[index].is_register()) {
-    Unuse(elements_[index].reg());
-  }
-  elements_[index] = new_element;
-}
-
-
-// Clear the dirty bit for the element at a given index.
-void VirtualFrame::SyncElementAt(int index) {
-  if (index <= stack_pointer_) {
-    if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
-  } else if (index == stack_pointer_ + 1) {
-    SyncElementByPushing(index);
-  } else {
-    SyncRange(stack_pointer_ + 1, index);
-  }
-}
-
-
-void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
-  // Perform state changes on this frame that will make merge to the
-  // expected frame simpler or else increase the likelihood that his
-  // frame will match another.
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement source = elements_[i];
-    FrameElement target = expected->elements_[i];
-
-    if (!target.is_valid() ||
-        (target.is_memory() && !source.is_memory() && source.is_synced())) {
-      // No code needs to be generated to invalidate valid elements.
-      // No code needs to be generated to move values to memory if
-      // they are already synced.  We perform those moves here, before
-      // merging.
-      if (source.is_register()) {
-        // If the frame is the code generator's current frame, we have
-        // to decrement both the frame-internal and global register
-        // counts.
-        if (cgen()->frame() == this) {
-          Unuse(source.reg());
-        } else {
-          set_register_location(source.reg(), kIllegalIndex);
-        }
-      }
-      elements_[i] = target;
-    } else if (target.is_register() && !target.is_synced() &&
-               !source.is_memory()) {
-      // If an element's target is a register that doesn't need to be
-      // synced, and the element is not in memory, then the sync state
-      // of the element is irrelevant.  We clear the sync bit.
-      ASSERT(source.is_valid());
-      elements_[i].clear_sync();
-    }
-  }
-}
-
-
-void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
-  ASSERT(height() >= dropped_args);
-  ASSERT(height() >= spilled_args);
-  ASSERT(dropped_args <= spilled_args);
-
-  SyncRange(0, element_count() - 1);
-  // Spill registers.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i)) {
-      SpillElementAt(register_location(i));
-    }
-  }
-
-  // Spill the arguments.
-  for (int i = element_count() - spilled_args; i < element_count(); i++) {
-    if (!elements_[i].is_memory()) {
-      SpillElementAt(i);
-    }
-  }
-
-  // Forget the frame elements that will be popped by the call.
-  Forget(dropped_args);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
-  // Find the leftmost (ordered by register number) register whose only
-  // reference is in the frame.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
-      SpillElementAt(register_location(i));
-      ASSERT(!cgen()->allocator()->is_used(i));
-      return RegisterAllocator::ToRegister(i);
-    }
-  }
-  return no_reg;
-}
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame-inl.h b/src/virtual-frame-inl.h
deleted file mode 100644
index c9f4aac..0000000
--- a/src/virtual-frame-inl.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_INL_H_
-#define V8_VIRTUAL_FRAME_INL_H_
-
-#include "virtual-frame.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "virtual-frame-heavy-inl.h"
-#else
-#include "virtual-frame-light-inl.h"
-#endif
-
-#endif  // V8_VIRTUAL_FRAME_INL_H_
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
deleted file mode 100644
index 681f93f..0000000
--- a/src/virtual-frame-light-inl.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
-#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-#include "codegen-inl.h"
-#include "jump-target-light-inl.h"
-
-namespace v8 {
-namespace internal {
-
-VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
-    : element_count_(0),
-      top_of_stack_state_(NO_TOS_REGISTERS),
-      register_allocation_map_(0),
-      tos_known_smi_map_(0) { }
-
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : element_count_(parameter_count() + 2),
-      top_of_stack_state_(NO_TOS_REGISTERS),
-      register_allocation_map_(0),
-      tos_known_smi_map_(0) { }
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : element_count_(original->element_count()),
-      top_of_stack_state_(original->top_of_stack_state_),
-      register_allocation_map_(original->register_allocation_map_),
-      tos_known_smi_map_(0) { }
-
-
-bool VirtualFrame::Equals(const VirtualFrame* other) {
-  ASSERT(element_count() == other->element_count());
-  if (top_of_stack_state_ != other->top_of_stack_state_) return false;
-  if (register_allocation_map_ != other->register_allocation_map_) return false;
-  if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
-
-  return true;
-}
-
-
-void VirtualFrame::PrepareForReturn() {
-  // Don't bother flushing tos registers as returning does not require more
-  // access to the expression stack.
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
-    CodeGenerator* cgen)
-  : cgen_(cgen),
-    old_is_spilled_(
-        Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-  Isolate::Current()->set_is_virtual_frame_in_spilled_scope(false);
-  if (old_is_spilled_) {
-    VirtualFrame* frame = cgen->frame();
-    if (frame != NULL) {
-      frame->AssertIsSpilled();
-    }
-  }
-}
-
-
-VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
-  Isolate::Current()->set_is_virtual_frame_in_spilled_scope(old_is_spilled_);
-  if (old_is_spilled_) {
-    VirtualFrame* frame = cgen_->frame();
-    if (frame != NULL) {
-      frame->SpillAll();
-    }
-  }
-}
-
-
-CodeGenerator* VirtualFrame::cgen() const {
-  return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-
-MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
-
-
-void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
-  if (arg_count != 0) Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  masm()->CallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() const {
-  return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() const {
-  return cgen()->scope()->num_stack_slots();
-}
-
-
-int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
-
-
-int VirtualFrame::context_index() { return frame_pointer() - 1; }
-
-
-int VirtualFrame::function_index() { return frame_pointer() - 2; }
-
-
-int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
-
-
-int VirtualFrame::fp_relative(int index) {
-  ASSERT(index < element_count());
-  ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-  return (frame_pointer() - index) * kPointerSize;
-}
-
-
-int VirtualFrame::expression_base_index() const {
-  return local0_index() + local_count();
-}
-
-
-int VirtualFrame::height() const {
-  return element_count() - expression_base_index();
-}
-
-
-MemOperand VirtualFrame::LocalAt(int index) {
-  ASSERT(0 <= index);
-  ASSERT(index < local_count());
-  return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/src/virtual-frame-light.cc b/src/virtual-frame-light.cc
deleted file mode 100644
index bbaaaf5..0000000
--- a/src/virtual-frame-light.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::Adjust(int count) {
-  ASSERT(count >= 0);
-  RaiseHeight(count, 0);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
-  UNIMPLEMENTED();
-  return no_reg;
-}
-
-
-InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
deleted file mode 100644
index 310ff59..0000000
--- a/src/virtual-frame.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// Specialization of List::ResizeAdd to non-inlined version for FrameElements.
-// The function ResizeAdd becomes a real function, whose implementation is the
-// inlined ResizeAddInternal.
-template <>
-void List<FrameElement,
-          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element) {
-  ResizeAddInternal(element);
-}
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
deleted file mode 100644
index 65d1009..0000000
--- a/src/virtual-frame.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_H_
-#define V8_VIRTUAL_FRAME_H_
-
-#include "frame-element.h"
-#include "macro-assembler.h"
-
-#include "list-inl.h"
-#include "utils.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/virtual-frame-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/virtual-frame-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/virtual-frame-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/virtual-frame-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Add() on List is inlined, ResizeAdd() called by Add() is inlined except for
-// Lists of FrameElements, and ResizeAddInternal() is inlined in ResizeAdd().
-template <>
-void List<FrameElement,
-          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element);
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_H_
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 21d3e54..fc3257d 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 86d5704..f2f0fc6 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -281,166 +281,6 @@
 }
 
 
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
-               op_name,
-               overwrite_name,
-               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
-               args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "",
-               static_operands_type_.ToString(),
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (!(left.is(left_arg) && right.is(right_arg))) {
-      if (left.is(right_arg) && right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          SetArgsReversed();
-        } else {
-          __ xchg(left, right);
-        }
-      } else if (left.is(left_arg)) {
-        __ movq(right_arg, right);
-      } else if (right.is(right_arg)) {
-        __ movq(left_arg, left);
-      } else if (left.is(right_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(left_arg, right);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying left argument.
-          __ movq(left_arg, left);
-          __ movq(right_arg, right);
-        }
-      } else if (right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(right_arg, left);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying right argument.
-          __ movq(right_arg, right);
-          __ movq(left_arg, left);
-        }
-      } else {
-        // Order of moves is not important.
-        __ movq(left_arg, left);
-        __ movq(right_arg, right);
-      }
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Smi* right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ Push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (left.is(left_arg)) {
-      __ Move(right_arg, right);
-    } else if (left.is(right_arg) && IsOperationCommutative()) {
-      __ Move(left_arg, right);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, left and right_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite left before moving
-      // it to left_arg.
-      __ movq(left_arg, left);
-      __ Move(right_arg, right);
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Smi* left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ Push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (right.is(right_arg)) {
-      __ Move(left_arg, left);
-    } else if (right.is(left_arg) && IsOperationCommutative()) {
-      __ Move(right_arg, left);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, right and left_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite right before moving
-      // it to right_arg.
-      __ movq(right_arg, right);
-      __ Move(left_arg, left);
-    }
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
 class FloatingPointHelper : public AllStatic {
  public:
   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
@@ -460,561 +300,28 @@
   // As above, but we know the operands to be numbers. In that case,
   // conversion can't fail.
   static void LoadNumbersAsIntegers(MacroAssembler* masm);
+
+  // Tries to convert two values to smis losslessly.
+  // This fails if either argument is not a Smi nor a HeapNumber,
+  // or if it's a HeapNumber with a value that can't be converted
+  // losslessly to a Smi. In that case, control transitions to the
+  // on_not_smis label.
+  // On success, either control goes to the on_success label (if one is
+  // provided), or it falls through at the end of the code (if on_success
+  // is NULL).
+  // On success, both first and second holds Smi tagged values.
+  // One of first or second must be non-Smi when entering.
+  static void NumbersToSmis(MacroAssembler* masm,
+                            Register first,
+                            Register second,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Label* on_success,
+                            Label* on_not_smis);
 };
 
 
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
-  // dividend in rax and rdx free for the division.  Use rax, rbx for those.
-  Comment load_comment(masm, "-- Load arguments");
-  Register left = rdx;
-  Register right = rax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    left = rax;
-    right = rbx;
-    if (HasArgsInRegisters()) {
-      __ movq(rbx, rax);
-      __ movq(rax, rdx);
-    }
-  }
-  if (!HasArgsInRegisters()) {
-    __ movq(right, Operand(rsp, 1 * kPointerSize));
-    __ movq(left, Operand(rsp, 2 * kPointerSize));
-  }
-
-  Label not_smis;
-  // 2. Smi check both operands.
-  if (static_operands_type_.IsSmi()) {
-    // Skip smi check if we know that both arguments are smis.
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    if (op_ == Token::BIT_OR) {
-      // Handle OR here, since we do extra smi-checking in the or code below.
-      __ SmiOr(right, right, left);
-      GenerateReturn(masm);
-      return;
-    }
-  } else {
-    if (op_ != Token::BIT_OR) {
-      // Skip the check for OR as it is better combined with the
-      // actual operation.
-      Comment smi_check_comment(masm, "-- Smi check arguments");
-      __ JumpIfNotBothSmi(left, right, &not_smis);
-    }
-  }
-
-  // 3. Operands are both smis (except for OR), perform the operation leaving
-  // the result in rax and check the result if necessary.
-  Comment perform_smi(masm, "-- Perform smi operation");
-  Label use_fp_on_smis;
-  switch (op_) {
-    case Token::ADD: {
-      ASSERT(right.is(rax));
-      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
-      break;
-    }
-
-    case Token::SUB: {
-      __ SmiSub(left, left, right, &use_fp_on_smis);
-      __ movq(rax, left);
-      break;
-    }
-
-    case Token::MUL:
-      ASSERT(right.is(rax));
-      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
-      break;
-
-    case Token::DIV:
-      ASSERT(left.is(rax));
-      __ SmiDiv(left, left, right, &use_fp_on_smis);
-      break;
-
-    case Token::MOD:
-      ASSERT(left.is(rax));
-      __ SmiMod(left, left, right, slow);
-      break;
-
-    case Token::BIT_OR:
-      ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ testb(right, Immediate(kSmiTagMask));
-      __ j(not_zero, &not_smis);
-      break;
-
-    case Token::BIT_AND:
-      ASSERT(right.is(rax));
-      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
-      break;
-
-    case Token::BIT_XOR:
-      ASSERT(right.is(rax));
-      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
-      break;
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      switch (op_) {
-        case Token::SAR:
-          __ SmiShiftArithmeticRight(left, left, right);
-          break;
-        case Token::SHR:
-          __ SmiShiftLogicalRight(left, left, right, slow);
-          break;
-        case Token::SHL:
-          __ SmiShiftLeft(left, left, right);
-          break;
-        default:
-          UNREACHABLE();
-      }
-      __ movq(rax, left);
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // 4. Emit return of result in rax.
-  GenerateReturn(masm);
-
-  // 5. For some operations emit inline code to perform floating point
-  // operations on known smis (e.g., if the result of the operation
-  // overflowed the smi range).
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      ASSERT(use_fp_on_smis.is_linked());
-      __ bind(&use_fp_on_smis);
-      if (op_ == Token::DIV) {
-        __ movq(rdx, rax);
-        __ movq(rax, rbx);
-      }
-      // left is rdx, right is rax.
-      __ AllocateHeapNumber(rbx, rcx, slow);
-      FloatingPointHelper::LoadSSE2SmiOperands(masm);
-      switch (op_) {
-        case Token::ADD: __ addsd(xmm0, xmm1); break;
-        case Token::SUB: __ subsd(xmm0, xmm1); break;
-        case Token::MUL: __ mulsd(xmm0, xmm1); break;
-        case Token::DIV: __ divsd(xmm0, xmm1); break;
-        default: UNREACHABLE();
-      }
-      __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-      __ movq(rax, rbx);
-      GenerateReturn(masm);
-    }
-    default:
-      break;
-  }
-
-  // 6. Non-smi operands, fall out to the non-smi code with the operands in
-  // rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
-  __ bind(&not_smis);
-
-  switch (op_) {
-    case Token::DIV:
-    case Token::MOD:
-      // Operands are in rax, rbx at this point.
-      __ movq(rdx, rax);
-      __ movq(rax, rbx);
-      break;
-
-    case Token::BIT_OR:
-      // Right operand is saved in rcx and rax was destroyed by the smi
-      // operation.
-      __ movq(rax, rcx);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
-
-  if (ShouldGenerateSmiCode()) {
-    GenerateSmiCode(masm, &call_runtime);
-  } else if (op_ != Token::MOD) {
-    if (!HasArgsInRegisters()) {
-      GenerateLoadArguments(masm);
-    }
-  }
-  // Floating point case.
-  if (ShouldGenerateFPCode()) {
-    switch (op_) {
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV: {
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-smi argument occurs
-          // (and only if smi code is generated). This is the right moment to
-          // patch to HEAP_NUMBERS state. The transition is attempted only for
-          // the four basic operations. The stub stays in the DEFAULT state
-          // forever for all other operations (also if smi code is skipped).
-          GenerateTypeTransition(masm);
-          break;
-        }
-
-        Label not_floats;
-        // rax: y
-        // rdx: x
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadSSE2NumberOperands(masm);
-        } else {
-          FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
-        }
-
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        // Allocate a heap number, if needed.
-        Label skip_allocation;
-        OverwriteMode mode = mode_;
-        if (HasArgsReversed()) {
-          if (mode == OVERWRITE_RIGHT) {
-            mode = OVERWRITE_LEFT;
-          } else if (mode == OVERWRITE_LEFT) {
-            mode = OVERWRITE_RIGHT;
-          }
-        }
-        switch (mode) {
-          case OVERWRITE_LEFT:
-            __ JumpIfNotSmi(rdx, &skip_allocation);
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rdx, rbx);
-            __ bind(&skip_allocation);
-            __ movq(rax, rdx);
-            break;
-          case OVERWRITE_RIGHT:
-            // If the argument in rax is already an object, we skip the
-            // allocation of a heap number.
-            __ JumpIfNotSmi(rax, &skip_allocation);
-            // Fall through!
-          case NO_OVERWRITE:
-            // Allocate a heap number for the result. Keep rax and rdx intact
-            // for the possible runtime call.
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rax, rbx);
-            __ bind(&skip_allocation);
-            break;
-          default: UNREACHABLE();
-        }
-        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-        GenerateReturn(masm);
-        __ bind(&not_floats);
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            !HasSmiCodeInStub()) {
-            // Execution reaches this point when the first non-number argument
-            // occurs (and only if smi code is skipped from the stub, otherwise
-            // the patching has already been done earlier in this case branch).
-            // A perfect moment to try patching to STRINGS for ADD operation.
-            if (op_ == Token::ADD) {
-              GenerateTypeTransition(masm);
-            }
-        }
-        break;
-      }
-      case Token::MOD: {
-        // For MOD we go directly to runtime in the non-smi case.
-        break;
-      }
-      case Token::BIT_OR:
-      case Token::BIT_AND:
-      case Token::BIT_XOR:
-      case Token::SAR:
-      case Token::SHL:
-      case Token::SHR: {
-        Label skip_allocation, non_smi_shr_result;
-        Register heap_number_map = r9;
-        __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadNumbersAsIntegers(masm);
-        } else {
-          FloatingPointHelper::LoadAsIntegers(masm,
-                                              &call_runtime,
-                                              heap_number_map);
-        }
-        switch (op_) {
-          case Token::BIT_OR:  __ orl(rax, rcx); break;
-          case Token::BIT_AND: __ andl(rax, rcx); break;
-          case Token::BIT_XOR: __ xorl(rax, rcx); break;
-          case Token::SAR: __ sarl_cl(rax); break;
-          case Token::SHL: __ shll_cl(rax); break;
-          case Token::SHR: {
-            __ shrl_cl(rax);
-            // Check if result is negative. This can only happen for a shift
-            // by zero.
-            __ testl(rax, rax);
-            __ j(negative, &non_smi_shr_result);
-            break;
-          }
-          default: UNREACHABLE();
-        }
-
-        STATIC_ASSERT(kSmiValueSize == 32);
-        // Tag smi result and return.
-        __ Integer32ToSmi(rax, rax);
-        GenerateReturn(masm);
-
-        // All bit-ops except SHR return a signed int32 that can be
-        // returned immediately as a smi.
-        // We might need to allocate a HeapNumber if we shift a negative
-        // number right by zero (i.e., convert to UInt32).
-        if (op_ == Token::SHR) {
-          ASSERT(non_smi_shr_result.is_linked());
-          __ bind(&non_smi_shr_result);
-          // Allocate a heap number if needed.
-          __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
-          switch (mode_) {
-            case OVERWRITE_LEFT:
-            case OVERWRITE_RIGHT:
-              // If the operand was an object, we skip the
-              // allocation of a heap number.
-              __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
-                                   1 * kPointerSize : 2 * kPointerSize));
-              __ JumpIfNotSmi(rax, &skip_allocation);
-              // Fall through!
-            case NO_OVERWRITE:
-              // Allocate heap number in new space.
-              // Not using AllocateHeapNumber macro in order to reuse
-              // already loaded heap_number_map.
-              __ AllocateInNewSpace(HeapNumber::kSize,
-                                    rax,
-                                    rcx,
-                                    no_reg,
-                                    &call_runtime,
-                                    TAG_OBJECT);
-              // Set the map.
-              if (FLAG_debug_code) {
-                __ AbortIfNotRootValue(heap_number_map,
-                                       Heap::kHeapNumberMapRootIndex,
-                                       "HeapNumberMap register clobbered.");
-              }
-              __ movq(FieldOperand(rax, HeapObject::kMapOffset),
-                      heap_number_map);
-              __ bind(&skip_allocation);
-              break;
-            default: UNREACHABLE();
-          }
-          // Store the result in the HeapNumber and return.
-          __ cvtqsi2sd(xmm0, rbx);
-          __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-          GenerateReturn(masm);
-        }
-
-        break;
-      }
-      default: UNREACHABLE(); break;
-    }
-  }
-
-  // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-  __ bind(&call_runtime);
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      // Registers containing left and right operands respectively.
-      Register lhs, rhs;
-
-      if (HasArgsReversed()) {
-        lhs = rax;
-        rhs = rdx;
-      } else {
-        lhs = rdx;
-        rhs = rax;
-      }
-
-      // Test for string arguments before calling runtime.
-      Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in rdx and rax.
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
-      Condition is_smi;
-      is_smi = masm->CheckSmi(lhs);
-      __ j(is_smi, &not_string1);
-      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
-      __ j(above_equal, &not_string1);
-
-      // First argument is a a string, test second.
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &string1_smi2);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
-      __ j(above_equal, &string1);
-
-      // First and second argument are strings.
-      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&string_add_stub);
-
-      __ bind(&string1_smi2);
-      // First argument is a string, second is a smi. Try to lookup the number
-      // string for the smi in the number string cache.
-      NumberToStringStub::GenerateLookupNumberStringCache(
-          masm, rhs, rbx, rcx, r8, true, &string1);
-
-      // Replace second argument on stack and tailcall string add stub to make
-      // the result.
-      __ movq(Operand(rsp, 1 * kPointerSize), rbx);
-      __ TailCallStub(&string_add_stub);
-
-      // Only first argument is a string.
-      __ bind(&string1);
-      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
-      // First argument was not a string, test second.
-      __ bind(&not_string1);
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &not_strings);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
-      __ j(above_equal, &not_strings);
-
-      // Only second argument is a string.
-      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
-      __ bind(&not_strings);
-      // Neither argument is a string.
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
-    case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
-    case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
-    case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  ASSERT(!HasArgsInRegisters());
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
-  __ pop(rcx);
-  if (HasArgsReversed()) {
-    __ push(rax);
-    __ push(rdx);
-  } else {
-    __ push(rdx);
-    __ push(rax);
-  }
-  __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  Label get_result;
-
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  // Left and right arguments are already on stack.
-  __ pop(rcx);  // Save the return address.
-
-  // Push this stub's key.
-  __ Push(Smi::FromInt(MinorKey()));
-
-  // Although the operation and the type info are encoded into the key,
-  // the encoding is opaque, so push them too.
-  __ Push(Smi::FromInt(op_));
-
-  __ Push(Smi::FromInt(runtime_operands_type_));
-
-  __ push(rcx);  // The return address.
-
-  // Perform patching to an appropriate fast case and return the result.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
@@ -1105,29 +412,30 @@
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
 
-  // We only generate heapnumber answers for overflowing calculations
-  // for the four basic arithmetic operations.
-  bool generate_inline_heapnumber_results =
-      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
-      (op_ == Token::ADD || op_ == Token::SUB ||
-       op_ == Token::MUL || op_ == Token::DIV);
-
   // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
   Register left = rdx;
   Register right = rax;
 
+  // We only generate heapnumber answers for overflowing calculations
+  // for the four basic arithmetic operations and logical right shift by 0.
+  bool generate_inline_heapnumber_results =
+      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+      (op_ == Token::ADD || op_ == Token::SUB ||
+       op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
 
   // Smi check of both operands.  If op is BIT_OR, the check is delayed
   // until after the OR operation.
   Label not_smis;
   Label use_fp_on_smis;
-  Label restore_MOD_registers;  // Only used if op_ == Token::MOD.
+  Label fail;
 
   if (op_ != Token::BIT_OR) {
     Comment smi_check_comment(masm, "-- Smi check arguments");
     __ JumpIfNotBothSmi(left, right, &not_smis);
   }
 
+  Label smi_values;
+  __ bind(&smi_values);
   // Perform the operation.
   Comment perform_smi(masm, "-- Perform smi operation");
   switch (op_) {
@@ -1166,9 +474,7 @@
 
     case Token::BIT_OR: {
       ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ JumpIfNotSmi(right, &not_smis);  // Test delayed until after BIT_OR.
+      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
       break;
       }
     case Token::BIT_XOR:
@@ -1192,7 +498,7 @@
       break;
 
     case Token::SHR:
-      __ SmiShiftLogicalRight(left, left, right, &not_smis);
+      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
       __ movq(rax, left);
       break;
 
@@ -1203,41 +509,52 @@
   // 5. Emit return of result in rax.  Some operations have registers pushed.
   __ ret(0);
 
-  // 6. For some operations emit inline code to perform floating point
-  //    operations on known smis (e.g., if the result of the operation
-  //    overflowed the smi range).
-  __ bind(&use_fp_on_smis);
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    // Restore left and right to rdx and rax.
-    __ movq(rdx, rcx);
-    __ movq(rax, rbx);
-  }
-
-
-  if (generate_inline_heapnumber_results) {
-    __ AllocateHeapNumber(rcx, rbx, slow);
-    Comment perform_float(masm, "-- Perform float operation on smis");
-    FloatingPointHelper::LoadSSE2SmiOperands(masm);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
+  if (use_fp_on_smis.is_linked()) {
+    // 6. For some operations emit inline code to perform floating point
+    //    operations on known smis (e.g., if the result of the operation
+    //    overflowed the smi range).
+    __ bind(&use_fp_on_smis);
+    if (op_ == Token::DIV || op_ == Token::MOD) {
+      // Restore left and right to rdx and rax.
+      __ movq(rdx, rcx);
+      __ movq(rax, rbx);
     }
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
+
+    if (generate_inline_heapnumber_results) {
+      __ AllocateHeapNumber(rcx, rbx, slow);
+      Comment perform_float(masm, "-- Perform float operation on smis");
+      if (op_ == Token::SHR) {
+        __ SmiToInteger32(left, left);
+        __ cvtqsi2sd(xmm0, left);
+      } else {
+        FloatingPointHelper::LoadSSE2SmiOperands(masm);
+        switch (op_) {
+        case Token::ADD: __ addsd(xmm0, xmm1); break;
+        case Token::SUB: __ subsd(xmm0, xmm1); break;
+        case Token::MUL: __ mulsd(xmm0, xmm1); break;
+        case Token::DIV: __ divsd(xmm0, xmm1); break;
+        default: UNREACHABLE();
+        }
+      }
+      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+      __ movq(rax, rcx);
+      __ ret(0);
+    } else {
+      __ jmp(&fail);
+    }
   }
 
   // 7. Non-smi operands reach the end of the code generated by
   //    GenerateSmiCode, and fall through to subsequent code,
   //    with the operands in rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
+  //    But first we check if non-smi values are HeapNumbers holding
+  //    values that could be smi.
   __ bind(&not_smis);
-  if (op_ == Token::BIT_OR) {
-    __ movq(right, rcx);
-  }
+  Comment done_comment(masm, "-- Enter non-smi code");
+  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+                                     &smi_values, &fail);
+  __ jmp(&smi_values);
+  __ bind(&fail);
 }
 
 
@@ -1422,12 +739,25 @@
 
 
 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  Label not_smi;
+  Label call_runtime;
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
 
-  GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
-
-  __ bind(&not_smi);
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
   GenerateTypeTransition(masm);
+
+  if (call_runtime.is_linked()) {
+    __ bind(&call_runtime);
+    GenerateCallRuntimeCode(masm);
+  }
 }
 
 
@@ -2046,6 +1376,62 @@
 }
 
 
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+                                        Register first,
+                                        Register second,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Label* on_success,
+                                        Label* on_not_smis)   {
+  Register heap_number_map = scratch3;
+  Register smi_result = scratch1;
+  Label done;
+
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  NearLabel first_smi, check_second;
+  __ JumpIfSmi(first, &first_smi);
+  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert HeapNumber to smi if possible.
+  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  // Check if conversion was successful by converting back and
+  // comparing to the original double's bits.
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(first, smi_result);
+
+  __ bind(&check_second);
+  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+  __ bind(&first_smi);
+  if (FLAG_debug_code) {
+    // Second should be non-smi if we get here.
+    __ AbortIfSmi(second);
+  }
+  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert second to smi, if possible.
+  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(second, smi_result);
+  if (on_success != NULL) {
+    __ jmp(on_success);
+  } else {
+    __ bind(&done);
+  }
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done;
 
@@ -3759,10 +3145,10 @@
   // is and instance of the function and anything else to
   // indicate that the value is not an instance.
 
-  static const int kOffsetToMapCheckValue = 5;
-  static const int kOffsetToResultValue = 21;
+  static const int kOffsetToMapCheckValue = 2;
+  static const int kOffsetToResultValue = 18;
   // The last 4 bytes of the instruction sequence
-  //   movq(rax, FieldOperand(rdi, HeapObject::kMapOffset)
+  //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
   //   Move(kScratchRegister, FACTORY->the_hole_value())
   // in front of the hole value address.
   static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
@@ -3828,7 +3214,7 @@
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
   }
 
@@ -3865,7 +3251,7 @@
     if (FLAG_debug_code) {
       __ movl(rax, Immediate(kWordBeforeResultValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
     }
     __ xorl(rax, rax);
   }
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 246650a..f97d099 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -71,145 +71,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      GenericBinaryFlags flags,
-                      TypeInfo operands_type = TypeInfo::Unknown())
-      : op_(op),
-        mode_(mode),
-        flags_(flags),
-        args_in_registers_(false),
-        args_reversed_(false),
-        static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
-        name_(NULL) {
-    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
-  }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        flags_(FlagBits::decode(key)),
-        args_in_registers_(ArgsInRegistersBits::decode(key)),
-        args_reversed_(ArgsReversedBits::decode(key)),
-        static_operands_type_(TypeInfo::ExpandedRepresentation(
-            StaticTypeInfoBits::decode(key))),
-        runtime_operands_type_(runtime_operands_type),
-        name_(NULL) {
-  }
-
-  // Generate code to call the stub with the supplied arguments. This will add
-  // code at the call site to prepare arguments either in registers or on the
-  // stack together with the actual call.
-  void GenerateCall(MacroAssembler* masm, Register left, Register right);
-  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
-  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
-  bool ArgsInRegistersSupported() {
-    return (op_ == Token::ADD) || (op_ == Token::SUB)
-        || (op_ == Token::MUL) || (op_ == Token::DIV);
-  }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  GenericBinaryFlags flags_;
-  bool args_in_registers_;  // Arguments passed in registers not on the stack.
-  bool args_reversed_;  // Left and right argument are swapped.
-
-  // Number type information of operands, determined by code generator.
-  TypeInfo static_operands_type_;
-
-  // Operand type information determined at runtime.
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           static_cast<int>(flags_),
-           static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_),
-           static_operands_type_.ToString());
-  }
-#endif
-
-  // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 7> {};
-  class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
-  class ArgsReversedBits: public BitField<bool, 10, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 12, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | FlagBits::encode(flags_)
-           | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_)
-           | StaticTypeInfoBits::encode(
-               static_operands_type_.ThreeBitRepresentation())
-           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
-  }
-
-  void Generate(MacroAssembler* masm);
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-  void GenerateLoadArguments(MacroAssembler* masm);
-  void GenerateReturn(MacroAssembler* masm);
-  void GenerateRegisterArgsPush(MacroAssembler* masm);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  bool IsOperationCommutative() {
-    return (op_ == Token::ADD) || (op_ == Token::MUL);
-  }
-
-  void SetArgsInRegisters() { args_in_registers_ = true; }
-  void SetArgsReversed() { args_reversed_ = true; }
-  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgsInRegisters() { return args_in_registers_; }
-  bool HasArgsReversed() { return args_reversed_; }
-
-  bool ShouldGenerateSmiCode() {
-    return HasSmiCodeInStub() &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  friend class CodeGenerator;
-  friend class LCodeGen;
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
deleted file mode 100644
index 53caf91..0000000
--- a/src/x64/codegen-x64-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 9cf85c4..f8f2d6e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,81 +29,14 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ push(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
-      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
-    }
-  }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
-  // Restore registers in reverse order due to the stack.
-  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ pop(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore) {
-      action &= ~kSyncedFlag;
-      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
-    }
-  }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
-  frame_state_.Restore(masm_);
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  frame_state_->Restore(masm);
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -114,8636 +47,6 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      destination_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
-                           ControlDestination* destination)
-    : owner_(owner),
-      destination_(destination),
-      previous_(owner->state()) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  set_in_spilled_code(false);
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  Isolate::Current()->set_jump_target_compiling_deferred_code(false);
-
-  {
-    CodeGenState state(this);
-    // Entry:
-    // Stack: receiver, arguments, return address.
-    // rbp: caller's frame pointer
-    // rsp: stack pointer
-    // rdi: called JS function
-    // rsi: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ int3();
-    }
-#endif
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Allocate the local context if needed.
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      Comment cmnt(masm_, "[ allocate local context");
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      frame_->PushFunction();
-      Result context;
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        context = frame_->CallStub(&stub, 1);
-      } else {
-        context = frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-      // Update context local.
-      frame_->SaveContextRegister();
-
-      // Verify that the runtime call result and rsi agree.
-      if (FLAG_debug_code) {
-        __ cmpq(context.reg(), rsi);
-        __ Assert(equal, "Runtime::NewContext should end up in rsi");
-      }
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // The use of SlotOperand below is safe in unspilled code
-          // because the slot is guaranteed to be a context slot.
-          //
-          // There are no parameters in the global scope.
-          ASSERT(!scope()->is_global_scope());
-          frame_->PushParameterAt(i);
-          Result value = frame_->Pop();
-          value.ToRegister();
-
-          // SlotOperand loads context.reg() with the context object
-          // stored to, used below in RecordWrite.
-          Result context = allocator_->Allocate();
-          ASSERT(context.is_valid());
-          __ movq(SlotOperand(slot, context.reg()), value.reg());
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          Result scratch = allocator_->Allocate();
-          ASSERT(scratch.is_valid());
-          frame_->Spill(context.reg());
-          frame_->Spill(value.reg());
-          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->Push(FACTORY->the_hole_value());
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-    CheckStack();
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-
-      // Handle the return from the function.
-      if (has_valid_frame()) {
-        // If there is a valid frame, control flow can fall off the end of
-        // the body.  In that case there is an implicit return statement.
-        ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(info->function());
-        frame_->PrepareForReturn();
-        Result undefined(FACTORY->undefined_value());
-        if (function_return_.is_bound()) {
-          function_return_.Jump(&undefined);
-        } else {
-          function_return_.Bind(&undefined);
-          GenerateReturnSequence(&undefined);
-        }
-      } else if (function_return_.is_linked()) {
-        // If the return target has dangling jumps to it, then we have not
-        // yet generated the return sequence.  This can happen when (a)
-        // control does not flow off the end of the body so we did not
-        // compile an artificial return statement just above, and (b) there
-        // are return statements in the body but (c) they are all shadowed.
-        Result return_value;
-        function_return_.Bind(&return_value);
-        GenerateReturnSequence(&return_value);
-      }
-    }
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    info->isolate()->set_jump_target_compiling_deferred_code(true);
-    ProcessDeferred();
-    info->isolate()->set_jump_target_compiling_deferred_code(false);
-  }
-
-  // There is no need to delete the register allocator, it is a
-  // stack-allocated local.
-  allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(rsi));  // do not overwrite context register
-      Register context = rsi;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return Operand(rsp, 0);
-  }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Result tmp,
-                                                         JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  ASSERT(tmp.is_register());
-  Register context = rsi;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-                Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  slow->Branch(not_equal, not_taken);
-  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
-                                  ControlDestination* dest,
-                                  bool force_control) {
-  ASSERT(!in_spilled_code());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, dest);
-    Visit(expr);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        !dest->is_used() &&
-        frame_->height() == original_height) {
-      dest->Goto(true);
-    }
-  }
-
-  if (force_control && !dest->is_used()) {
-    // Convert the TOS value into flow to the control destination.
-    ToBoolean(dest);
-  }
-
-  ASSERT(!(force_control && !dest->is_used()));
-  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Load(expression);
-  frame_->SpillAll();
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  JumpTarget true_target;
-  JumpTarget false_target;
-  ControlDestination dest(&true_target, &false_target, true);
-  LoadCondition(expr, &dest, false);
-
-  if (dest.false_was_fall_through()) {
-    // The false target was just bound.
-    JumpTarget loaded;
-    frame_->Push(FACTORY->false_value());
-    // There may be dangling jumps to the true target.
-    if (true_target.is_linked()) {
-      loaded.Jump();
-      true_target.Bind();
-      frame_->Push(FACTORY->true_value());
-      loaded.Bind();
-    }
-
-  } else if (dest.is_used()) {
-    // There is true, and possibly false, control flow (with true as
-    // the fall through).
-    JumpTarget loaded;
-    frame_->Push(FACTORY->true_value());
-    if (false_target.is_linked()) {
-      loaded.Jump();
-      false_target.Bind();
-      frame_->Push(FACTORY->false_value());
-      loaded.Bind();
-    }
-
-  } else {
-    // We have a valid value on top of the frame, but we still may
-    // have dangling jumps to the true and false targets from nested
-    // subexpressions (eg, the left subexpressions of the
-    // short-circuited boolean operators).
-    ASSERT(has_valid_frame());
-    if (true_target.is_linked() || false_target.is_linked()) {
-      JumpTarget loaded;
-      loaded.Jump();  // Don't lose the current TOS.
-      if (true_target.is_linked()) {
-        true_target.Bind();
-        frame_->Push(FACTORY->true_value());
-        if (false_target.is_linked()) {
-          loaded.Jump();
-        }
-      }
-      if (false_target.is_linked()) {
-        false_target.Bind();
-        frame_->Push(FACTORY->false_value());
-      }
-      loaded.Bind();
-    }
-  }
-
-  ASSERT(has_valid_frame());
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  if (in_spilled_code()) {
-    frame_->EmitPush(GlobalObjectOperand());
-  } else {
-    Result temp = allocator_->Allocate();
-    __ movq(temp.reg(), GlobalObjectOperand());
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
-  Result temp = allocator_->Allocate();
-  Register reg = temp.reg();
-  __ movq(reg, GlobalObjectOperand());
-  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the arguments marker value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->Push(FACTORY->arguments_marker());
-  } else {
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    frame_->PushFunction();
-    frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope()->num_parameters()));
-    Result result = frame_->CallStub(&stub, 3);
-    frame_->Push(&result);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  bool skip_arguments = false;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
-    if (probe.is_constant()) {
-      // We have to skip updating the arguments object if it has
-      // been assigned a proper value.
-      skip_arguments = !probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      done.Branch(not_equal);
-    }
-  }
-  if (!skip_arguments) {
-    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  }
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-  return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool  persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  // References are loaded from both spilled and unspilled code.  Set the
-  // state to unspilled to allow that (and explicitly spill after
-  // construction at the construction sites).
-  bool was_in_spilled_code = in_spilled_code_;
-  in_spilled_code_ = false;
-
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      // If rax is free, the register allocator prefers it.  Thus the code
-      // generator will load the global object into rax, which is where
-      // LoadIC wants it.  Most uses of Reference call LoadIC directly
-      // after the reference is created.
-      frame_->Spill(rax);
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-
-  in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  // Pop a reference from the stack while preserving TOS.
-  Comment cmnt(masm_, "[ UnloadReference");
-  frame_->Nip(ref->size());
-  ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
-  Comment cmnt(masm_, "[ ToBoolean");
-
-  // The value to convert should be popped from the frame.
-  Result value = frame_->Pop();
-  value.ToRegister();
-
-  if (value.is_number()) {
-    // Fast case if TypeInfo indicates only numbers.
-    if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg());
-    }
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    if (value.is_smi()) {
-      value.Unuse();
-      dest->Split(not_zero);
-    } else {
-      dest->false_target()->Branch(equal);
-      Condition is_smi = masm_->CheckSmi(value.reg());
-      dest->true_target()->Branch(is_smi);
-      __ xorpd(xmm0, xmm0);
-      __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
-      value.Unuse();
-      dest->Split(not_zero);
-    }
-  } else {
-    // Fast case checks.
-    // 'false' => false.
-    __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // 'true' => true.
-    __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
-    dest->true_target()->Branch(equal);
-
-    // 'undefined' => false.
-    __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    dest->false_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(value.reg());
-    dest->true_target()->Branch(is_smi);
-
-    // Call the stub for all other cases.
-    frame_->Push(&value);  // Undo the Pop() from above.
-    ToBooleanStub stub;
-    Result temp = frame_->CallStub(&stub, 1);
-    // Convert the result to a condition code.
-    __ testq(temp.reg(), temp.reg());
-    temp.Unuse();
-    dest->Split(not_equal);
-  }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
-  DeferredInlineBinaryOperation(Token::Value op,
-                                Register dst,
-                                Register left,
-                                Register right,
-                                OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
-    set_comment("[ DeferredInlineBinaryOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register left_;
-  Register right_;
-  OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
-  Label done;
-  if ((op_ == Token::ADD)
-      || (op_ == Token::SUB)
-      || (op_ == Token::MUL)
-      || (op_ == Token::DIV)) {
-    Label call_runtime;
-    Label left_smi, right_smi, load_right, do_op;
-    __ JumpIfSmi(left_, &left_smi);
-    __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_LEFT) {
-      __ movq(dst_, left_);
-    }
-    __ jmp(&load_right);
-
-    __ bind(&left_smi);
-    __ SmiToInteger32(left_, left_);
-    __ cvtlsi2sd(xmm0, left_);
-    __ Integer32ToSmi(left_, left_);
-    if (mode_ == OVERWRITE_LEFT) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&load_right);
-    __ JumpIfSmi(right_, &right_smi);
-    __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_RIGHT) {
-      __ movq(dst_, right_);
-    } else if (mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-    __ jmp(&do_op);
-
-    __ bind(&right_smi);
-    __ SmiToInteger32(right_, right_);
-    __ cvtlsi2sd(xmm1, right_);
-    __ Integer32ToSmi(right_, right_);
-    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&do_op);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
-    }
-    __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    __ jmp(&done);
-
-    __ bind(&call_runtime);
-  }
-  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, left_, right_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
-                                  Token::Value op,
-                                  const Result& right,
-                                  const Result& left) {
-  // Set TypeInfo of result according to the operation performed.
-  // We rely on the fact that smis have a 32 bit payload on x64.
-  STATIC_ASSERT(kSmiValueSize == 32);
-  switch (op) {
-    case Token::COMMA:
-      return right.type_info();
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      return operands_type;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SAR:
-    case Token::SHL:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SHR:
-      // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
-      return (right.is_constant() && right.handle()->IsSmi()
-                     && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
-          ? TypeInfo::Smi()
-          : TypeInfo::Number();
-    case Token::ADD:
-      if (operands_type.IsNumber()) {
-        return TypeInfo::Number();
-      } else if (left.type_info().IsString() || right.type_info().IsString()) {
-        return TypeInfo::String();
-      } else {
-        return TypeInfo::Unknown();
-      }
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      return TypeInfo::Number();
-    default:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
-                                           OverwriteMode overwrite_mode) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Comment cmnt_token(masm_, Token::String(op));
-
-  if (op == Token::COMMA) {
-    // Simply discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  if (op == Token::ADD) {
-    const bool left_is_string = left.type_info().IsString();
-    const bool right_is_string = right.type_info().IsString();
-    // Make sure constant strings have string type info.
-    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
-           left_is_string);
-    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
-           right_is_string);
-    if (left_is_string || right_is_string) {
-      frame_->Push(&left);
-      frame_->Push(&right);
-      Result answer;
-      if (left_is_string) {
-        if (right_is_string) {
-          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        } else {
-          answer =
-            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
-        }
-      } else if (right_is_string) {
-        answer =
-          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
-      }
-      answer.set_type_info(TypeInfo::String());
-      frame_->Push(&answer);
-      return;
-    }
-    // Neither operand is known to be a string.
-  }
-
-  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi_constant =
-      right.is_constant() && !right.handle()->IsSmi();
-
-  if (left_is_smi_constant && right_is_smi_constant) {
-    // Compute the constant result at compile time, and leave it on the frame.
-    int left_int = Smi::cast(*left.handle())->value();
-    int right_int = Smi::cast(*right.handle())->value();
-    if (FoldConstantSmis(op, left_int, right_int)) return;
-  }
-
-  // Get number type of left and right sub-expressions.
-  TypeInfo operands_type =
-      TypeInfo::Combine(left.type_info(), right.type_info());
-
-  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
-  Result answer;
-  if (left_is_non_smi_constant || right_is_non_smi_constant) {
-    // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op,
-                             overwrite_mode,
-                             NO_SMI_CODE_IN_STUB,
-                             operands_type);
-    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-  } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
-                                        false, overwrite_mode);
-  } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
-                                        true, overwrite_mode);
-  } else {
-    // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on smis. Still only
-    // generate the inline Smi check code if this operation is part of a loop.
-    // For all other operations only inline the Smi check code for likely smis
-    // if the operation is part of a loop.
-    if (loop_nesting() > 0 &&
-        (Token::IsBitOp(op) ||
-         operands_type.IsInteger32() ||
-         expr->type()->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
-    } else {
-      GenericBinaryOpStub stub(op,
-                               overwrite_mode,
-                               NO_GENERIC_BINARY_FLAGS,
-                               operands_type);
-      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-    }
-  }
-
-  answer.set_type_info(result_type);
-  frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  Object* answer_object = HEAP->undefined_value();
-  switch (op) {
-    case Token::ADD:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
-        answer_object = Smi::FromInt(left + right);
-      }
-      break;
-    case Token::SUB:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
-        answer_object = Smi::FromInt(left - right);
-      }
-      break;
-    case Token::MUL: {
-        double answer = static_cast<double>(left) * right;
-        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
-          // If the product is zero and the non-zero factor is negative,
-          // the spec requires us to return floating point negative zero.
-          if (answer != 0 || (left >= 0 && right >= 0)) {
-            answer_object = Smi::FromInt(static_cast<int>(answer));
-          }
-        }
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD:
-      break;
-    case Token::BIT_OR:
-      answer_object = Smi::FromInt(left | right);
-      break;
-    case Token::BIT_AND:
-      answer_object = Smi::FromInt(left & right);
-      break;
-    case Token::BIT_XOR:
-      answer_object = Smi::FromInt(left ^ right);
-      break;
-
-    case Token::SHL: {
-        int shift_amount = right & 0x1F;
-        if (Smi::IsValid(left << shift_amount)) {
-          answer_object = Smi::FromInt(left << shift_amount);
-        }
-        break;
-      }
-    case Token::SHR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        unsigned_left >>= shift_amount;
-        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
-          answer_object = Smi::FromInt(unsigned_left);
-        }
-        break;
-      }
-    case Token::SAR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        if (left < 0) {
-          // Perform arithmetic shift of a negative number by
-          // complementing number, logical shifting, complementing again.
-          unsigned_left = ~unsigned_left;
-          unsigned_left >>= shift_amount;
-          unsigned_left = ~unsigned_left;
-        } else {
-          unsigned_left >>= shift_amount;
-        }
-        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
-        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
-        break;
-      }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  if (answer_object->IsUndefined()) {
-    return false;
-  }
-  frame_->Push(Handle<Object>(answer_object));
-  return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
-                                               Result* right,
-                                               JumpTarget* both_smi) {
-  TypeInfo left_info = left->type_info();
-  TypeInfo right_info = right->type_info();
-  if (left_info.IsDouble() || left_info.IsString() ||
-      right_info.IsDouble() || right_info.IsString()) {
-    // We know that left and right are not both smi.  Don't do any tests.
-    return;
-  }
-
-  if (left->reg().is(right->reg())) {
-    if (!left_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
-                                              TypeInfo type,
-                                              DeferredCode* deferred) {
-  if (!type.IsSmi()) {
-        __ JumpIfNotSmi(reg, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(reg);
-  }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  DeferredCode* deferred) {
-  if (!left_info.IsSmi() && !right_info.IsSmi()) {
-    __ JumpIfNotBothSmi(left, right, deferred->entry_label());
-  } else if (!left_info.IsSmi()) {
-    __ JumpIfNotSmi(left, deferred->entry_label());
-  } else if (!right_info.IsSmi()) {
-    __ JumpIfNotSmi(right, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(left);
-    __ AbortIfNotSmi(right);
-  }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
-                                               Result* left,
-                                               Result* right,
-                                               OverwriteMode overwrite_mode) {
-  // Copy the type info because left and right may be overwritten.
-  TypeInfo left_type_info = left->type_info();
-  TypeInfo right_type_info = right->type_info();
-  Token::Value op = expr->op();
-  Result answer;
-  // Special handling of div and mod because they use fixed registers.
-  if (op == Token::DIV || op == Token::MOD) {
-    // We need rax as the quotient register, rdx as the remainder
-    // register, neither left nor right in rax or rdx, and left copied
-    // to rax.
-    Result quotient;
-    Result remainder;
-    bool left_is_in_rax = false;
-    // Step 1: get rax for quotient.
-    if ((left->is_register() && left->reg().is(rax)) ||
-        (right->is_register() && right->reg().is(rax))) {
-      // One or both is in rax.  Use a fresh non-rdx register for
-      // them.
-      Result fresh = allocator_->Allocate();
-      ASSERT(fresh.is_valid());
-      if (fresh.reg().is(rdx)) {
-        remainder = fresh;
-        fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-      }
-      if (left->is_register() && left->reg().is(rax)) {
-        quotient = *left;
-        *left = fresh;
-        left_is_in_rax = true;
-      }
-      if (right->is_register() && right->reg().is(rax)) {
-        quotient = *right;
-        *right = fresh;
-      }
-      __ movq(fresh.reg(), rax);
-    } else {
-      // Neither left nor right is in rax.
-      quotient = allocator_->Allocate(rax);
-    }
-    ASSERT(quotient.is_register() && quotient.reg().is(rax));
-    ASSERT(!(left->is_register() && left->reg().is(rax)));
-    ASSERT(!(right->is_register() && right->reg().is(rax)));
-
-    // Step 2: get rdx for remainder if necessary.
-    if (!remainder.is_valid()) {
-      if ((left->is_register() && left->reg().is(rdx)) ||
-          (right->is_register() && right->reg().is(rdx))) {
-        Result fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-        if (left->is_register() && left->reg().is(rdx)) {
-          remainder = *left;
-          *left = fresh;
-        }
-        if (right->is_register() && right->reg().is(rdx)) {
-          remainder = *right;
-          *right = fresh;
-        }
-        __ movq(fresh.reg(), rdx);
-      } else {
-        // Neither left nor right is in rdx.
-        remainder = allocator_->Allocate(rdx);
-      }
-    }
-    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
-    ASSERT(!(left->is_register() && left->reg().is(rdx)));
-    ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
-    left->ToRegister();
-    right->ToRegister();
-    frame_->Spill(rax);
-    frame_->Spill(rdx);
-
-    // Check that left and right are smi tagged.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          (op == Token::DIV) ? rax : rdx,
-                                          left->reg(),
-                                          right->reg(),
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                  left_type_info, right_type_info, deferred);
-
-    if (op == Token::DIV) {
-      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = quotient;
-    } else {
-      ASSERT(op == Token::MOD);
-      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = remainder;
-    }
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Special handling of shift operations because they use fixed
-  // registers.
-  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
-    // Move left out of rcx if necessary.
-    if (left->is_register() && left->reg().is(rcx)) {
-      *left = allocator_->Allocate();
-      ASSERT(left->is_valid());
-      __ movq(left->reg(), rcx);
-    }
-    right->ToRegister(rcx);
-    left->ToRegister();
-    ASSERT(left->is_register() && !left->reg().is(rcx));
-    ASSERT(right->is_register() && right->reg().is(rcx));
-
-    // We will modify right, it must be spilled.
-    frame_->Spill(rcx);
-
-    // Use a fresh answer register to avoid spilling the left operand.
-    answer = allocator_->Allocate();
-    ASSERT(answer.is_valid());
-    // Check that both operands are smis using the answer register as a
-    // temporary.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          answer.reg(),
-                                          left->reg(),
-                                          rcx,
-                                          overwrite_mode);
-
-    Label do_op;
-    // Left operand must be unchanged in left->reg() for deferred code.
-    // Left operand is in answer.reg(), possibly converted to int32, for
-    // inline code.
-    __ movq(answer.reg(), left->reg());
-    if (right_type_info.IsSmi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(right->reg());
-      }
-      // If left is not known to be a smi, check if it is.
-      // If left is not known to be a number, and it isn't a smi, check if
-      // it is a HeapNumber.
-      if (!left_type_info.IsSmi()) {
-        __ JumpIfSmi(answer.reg(), &do_op);
-        if (!left_type_info.IsNumber()) {
-          // Branch if not a heapnumber.
-          __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
-                 FACTORY->heap_number_map());
-          deferred->Branch(not_equal);
-        }
-        // Load integer value into answer register using truncation.
-        __ cvttsd2si(answer.reg(),
-                     FieldOperand(answer.reg(), HeapNumber::kValueOffset));
-        // Branch if we might have overflowed.
-        // (False negative for Smi::kMinValue)
-        __ cmpl(answer.reg(), Immediate(0x80000000));
-        deferred->Branch(equal);
-        // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
-        __ Integer32ToSmi(answer.reg(), answer.reg());
-      } else {
-        // Fast case - both are actually smis.
-        if (FLAG_debug_code) {
-          __ AbortIfNotSmi(left->reg());
-        }
-      }
-    } else {
-      JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
-                                    left_type_info, right_type_info, deferred);
-    }
-    __ bind(&do_op);
-
-    // Perform the operation.
-    switch (op) {
-      case Token::SAR:
-        __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
-        break;
-      case Token::SHR: {
-        __ SmiShiftLogicalRight(answer.reg(),
-                                answer.reg(),
-                                rcx,
-                                deferred->entry_label());
-        break;
-      }
-      case Token::SHL: {
-        __ SmiShiftLeft(answer.reg(),
-                        answer.reg(),
-                        rcx);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-    deferred->BindExit();
-    left->Unuse();
-    right->Unuse();
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Handle the other binary operations.
-  left->ToRegister();
-  right->ToRegister();
-  // A newly allocated register answer is used to hold the answer.  The
-  // registers containing left and right are not modified so they don't
-  // need to be spilled in the fast case.
-  answer = allocator_->Allocate();
-  ASSERT(answer.is_valid());
-
-  // Perform the smi tag check.
-  DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op,
-                                        answer.reg(),
-                                        left->reg(),
-                                        right->reg(),
-                                        overwrite_mode);
-  JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                left_type_info, right_type_info, deferred);
-
-  switch (op) {
-    case Token::ADD:
-      __ SmiAdd(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::SUB:
-      __ SmiSub(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::MUL: {
-      __ SmiMul(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-    }
-
-    case Token::BIT_OR:
-      __ SmiOr(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_AND:
-      __ SmiAnd(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_XOR:
-      __ SmiXor(answer.reg(), left->reg(), right->reg());
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-  deferred->BindExit();
-  left->Unuse();
-  right->Unuse();
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             Register dst,
-                             Register src,
-                             Smi* value,
-                             OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        src_(src),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register src_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
-  // For mod we don't generate all the Smi code inline.
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, src_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
-  DeferredInlineSmiOperationReversed(Token::Value op,
-                                     Register dst,
-                                     Smi* value,
-                                     Register src,
-                                     OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        value_(value),
-        src_(src),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperationReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Smi* value_;
-  Register src_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, value_, src_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
-  DeferredInlineSmiAdd(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAdd");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
-  DeferredInlineSmiAddReversed(Register dst,
-                               Smi* value,
-                               OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAddReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, value_, dst_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
-  DeferredInlineSmiSub(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiSub");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
-  // Generate inline code for a binary operation when one of the
-  // operands is a constant smi.  Consumes the argument "operand".
-  if (IsUnsafeSmi(value)) {
-    Result unsafe_operand(value);
-    if (reversed) {
-      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
-                               overwrite_mode);
-    } else {
-      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
-                               overwrite_mode);
-    }
-  }
-
-  // Get the literal value.
-  Smi* smi_value = Smi::cast(*value);
-  int int_value = smi_value->value();
-
-  Token::Value op = expr->op();
-  Result answer;
-  switch (op) {
-    case Token::ADD: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
-                                                    smi_value,
-                                                    overwrite_mode);
-      } else {
-        deferred = new DeferredInlineSmiAdd(operand->reg(),
-                                            smi_value,
-                                            overwrite_mode);
-      }
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      __ SmiAddConstant(operand->reg(),
-                        operand->reg(),
-                        smi_value,
-                        deferred->entry_label());
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::SUB: {
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        answer = *operand;
-        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
-                                                          smi_value,
-                                                          overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        // A smi currently fits in a 32-bit Immediate.
-        __ SmiSubConstant(operand->reg(),
-                          operand->reg(),
-                          smi_value,
-                          deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-    }
-
-    case Token::SAR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftArithmeticRightConstant(operand->reg(),
-                                           operand->reg(),
-                                           shift_value);
-        deferred->BindExit();
-        answer = *operand;
-      }
-      break;
-
-    case Token::SHR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           answer.reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftLogicalRightConstant(answer.reg(),
-                                        operand->reg(),
-                                        shift_value,
-                                        deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-
-    case Token::SHL:
-      if (reversed) {
-        operand->ToRegister();
-
-        // We need rcx to be available to hold operand, and to be spilled.
-        // SmiShiftLeft implicitly modifies rcx.
-        if (operand->reg().is(rcx)) {
-          frame_->Spill(operand->reg());
-          answer = allocator()->Allocate();
-        } else {
-          Result rcx_reg = allocator()->Allocate(rcx);
-          // answer must not be rcx.
-          answer = allocator()->Allocate();
-          // rcx_reg goes out of scope.
-        }
-
-        DeferredInlineSmiOperationReversed* deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-
-        __ Move(answer.reg(), smi_value);
-        __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
-        operand->Unuse();
-
-        deferred->BindExit();
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        if (shift_value == 0) {
-          // Spill operand so it can be overwritten in the slow case.
-          frame_->Spill(operand->reg());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          deferred->BindExit();
-          answer = *operand;
-        } else {
-          // Use a fresh temporary for nonzero shift values.
-          answer = allocator()->Allocate();
-          ASSERT(answer.is_valid());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             answer.reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          __ SmiShiftLeftConstant(answer.reg(),
-                                  operand->reg(),
-                                  shift_value);
-          deferred->BindExit();
-          operand->Unuse();
-        }
-      }
-      break;
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      if (reversed) {
-        // Bit operations with a constant smi are commutative.
-        // We can swap left and right operands with no problem.
-        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
-        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
-      }
-      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
-                                                               operand->reg(),
-                                                               operand->reg(),
-                                                               smi_value,
-                                                               overwrite_mode);
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      if (op == Token::BIT_AND) {
-        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
-      } else if (op == Token::BIT_XOR) {
-        if (int_value != 0) {
-          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      } else {
-        ASSERT(op == Token::BIT_OR);
-        if (int_value != 0) {
-          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      }
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    // Generate inline code for mod of powers of 2 and negative powers of 2.
-    case Token::MOD:
-      if (!reversed &&
-          int_value != 0 &&
-          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
-        if (int_value < 0) int_value = -int_value;
-        if (int_value == 1) {
-          __ Move(operand->reg(), Smi::FromInt(0));
-        } else {
-          __ SmiAndConstant(operand->reg(),
-                            operand->reg(),
-                            Smi::FromInt(int_value - 1));
-        }
-        deferred->BindExit();
-        answer = *operand;
-        break;  // This break only applies if we generated code for MOD.
-      }
-      // Fall through if we did not find a power of 2 on the right hand side!
-      // The next case must be the default.
-
-    default: {
-      Result constant_operand(value);
-      if (reversed) {
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                          overwrite_mode);
-      }
-      break;
-    }
-  }
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
-  if (result.type_info().IsSmi()) return false;
-  if (result.type_info().IsInteger32()) return false;
-  if (!result.is_constant()) return true;
-  if (!result.handle()->IsHeapNumber()) return false;
-  return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
-  switch (cc) {
-    case less:          return below;
-    case equal:         return equal;
-    case less_equal:    return below_equal;
-    case greater:       return above;
-    case greater_equal: return above_equal;
-    default:            UNREACHABLE();
-  }
-  UNREACHABLE();
-  return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
-                                        bool inline_number_compare) {
-  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
-  if (nan_info == kCantBothBeNaN) {
-    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
-  }
-  if (inline_number_compare) {
-    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
-  }
-  return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
-                               Condition cc,
-                               bool strict,
-                               ControlDestination* dest) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  Result left_side;
-  Result right_side;
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == greater || cc == less_equal) {
-    cc = ReverseCondition(cc);
-    left_side = frame_->Pop();
-    right_side = frame_->Pop();
-  } else {
-    right_side = frame_->Pop();
-    left_side = frame_->Pop();
-  }
-  ASSERT(cc == less || cc == equal || cc == greater_equal);
-
-  // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi = false;
-  bool left_side_constant_null = false;
-  bool left_side_constant_1_char_string = false;
-  if (left_side.is_constant()) {
-    left_side_constant_smi = left_side.handle()->IsSmi();
-    left_side_constant_null = left_side.handle()->IsNull();
-    left_side_constant_1_char_string =
-        (left_side.handle()->IsString() &&
-         String::cast(*left_side.handle())->length() == 1 &&
-         String::cast(*left_side.handle())->IsAsciiRepresentation());
-  }
-  bool right_side_constant_smi = false;
-  bool right_side_constant_null = false;
-  bool right_side_constant_1_char_string = false;
-  if (right_side.is_constant()) {
-    right_side_constant_smi = right_side.handle()->IsSmi();
-    right_side_constant_null = right_side.handle()->IsNull();
-    right_side_constant_1_char_string =
-        (right_side.handle()->IsString() &&
-         String::cast(*right_side.handle())->length() == 1 &&
-         String::cast(*right_side.handle())->IsAsciiRepresentation());
-  }
-
-  if (left_side_constant_smi || right_side_constant_smi) {
-    bool is_loop_condition = (node->AsExpression() != NULL) &&
-        node->AsExpression()->is_loop_condition();
-    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
-                          left_side_constant_smi, right_side_constant_smi,
-                          is_loop_condition);
-  } else if (left_side_constant_1_char_string ||
-             right_side_constant_1_char_string) {
-    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
-      // Trivial case, comparing two constants.
-      int left_value = String::cast(*left_side.handle())->Get(0);
-      int right_value = String::cast(*right_side.handle())->Get(0);
-      switch (cc) {
-        case less:
-          dest->Goto(left_value < right_value);
-          break;
-        case equal:
-          dest->Goto(left_value == right_value);
-          break;
-        case greater_equal:
-          dest->Goto(left_value >= right_value);
-          break;
-        default:
-          UNREACHABLE();
-      }
-    } else {
-      // Only one side is a constant 1 character string.
-      // If left side is a constant 1-character string, reverse the operands.
-      // Since one side is a constant string, conversion order does not matter.
-      if (left_side_constant_1_char_string) {
-        Result temp = left_side;
-        left_side = right_side;
-        right_side = temp;
-        cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
-        // CompareStub and the inline code both support all values of cc.
-      }
-      // Implement comparison against a constant string, inlining the case
-      // where both sides are strings.
-      left_side.ToRegister();
-
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_not_string, is_string;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
-      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
-      Condition is_smi = masm()->CheckSmi(left_reg);
-      is_not_string.Branch(is_smi, &left_side);
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ movq(temp.reg(),
-              FieldOperand(left_reg, HeapObject::kMapOffset));
-      __ movzxbl(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      // If we are testing for equality then make use of the symbol shortcut.
-      // Check if the left hand side has the same type as the right hand
-      // side (which is always a symbol).
-      if (cc == equal) {
-        Label not_a_symbol;
-        STATIC_ASSERT(kSymbolTag != 0);
-        // Ensure that no non-strings have the symbol bit set.
-        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
-        __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
-        __ j(zero, &not_a_symbol);
-        // They are symbols, so do identity compare.
-        __ Cmp(left_reg, right_side.handle());
-        dest->true_target()->Branch(equal);
-        dest->false_target()->Branch(not_equal);
-        __ bind(&not_a_symbol);
-      }
-      // Call the compare stub if the left side is not a flat ascii string.
-      __ andb(temp.reg(),
-              Immediate(kIsNotStringMask |
-                        kStringRepresentationMask |
-                        kStringEncodingMask));
-      __ cmpb(temp.reg(),
-              Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-      temp.Unuse();
-      is_string.Branch(equal, &left_side);
-
-      // Setup and call the compare stub.
-      is_not_string.Bind(&left_side);
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_string.Bind(&left_side);
-      // left_side is a sequential ASCII string.
-      ASSERT(left_side.reg().is(left_reg));
-      right_side = Result(right_val);
-      Result temp2 = allocator_->Allocate();
-      ASSERT(temp2.is_valid());
-      // Test string equality and comparison.
-      if (cc == equal) {
-        Label comparison_done;
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ j(not_equal, &comparison_done);
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                Immediate(char_value));
-        __ bind(&comparison_done);
-      } else {
-        __ movq(temp2.reg(),
-                FieldOperand(left_side.reg(), String::kLengthOffset));
-        __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
-        Label comparison;
-        // If the length is 0 then the subtraction gave -1 which compares less
-        // than any character.
-        __ j(negative, &comparison);
-        // Otherwise load the first character.
-        __ movzxbl(temp2.reg(),
-                   FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
-        __ bind(&comparison);
-        // Compare the first character of the string with the
-        // constant 1-character string.
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
-        __ cmpb(temp2.reg(), Immediate(char_value));
-        Label characters_were_different;
-        __ j(not_equal, &characters_were_different);
-        // If the first character is the same then the long string sorts after
-        // the short one.
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ bind(&characters_were_different);
-      }
-      temp2.Unuse();
-      left_side.Unuse();
-      right_side.Unuse();
-      dest->Split(cc);
-    }
-  } else {
-    // Neither side is a constant Smi, constant 1-char string, or constant null.
-    // If either side is a non-smi constant, or known to be a heap number,
-    // skip the smi check.
-    bool known_non_smi =
-        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
-        left_side.type_info().IsDouble() ||
-        right_side.type_info().IsDouble();
-
-    NaNInformation nan_info =
-        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
-        kBothCouldBeNaN :
-        kCantBothBeNaN;
-
-    // Inline number comparison handling any combination of smi's and heap
-    // numbers if:
-    //   code is in a loop
-    //   the compare operation is different from equal
-    //   compare is not a for-loop comparison
-    // The reason for excluding equal is that it will most likely be done
-    // with smi's (not heap numbers) and the code to comparing smi's is inlined
-    // separately. The same reason applies for for-loop comparison which will
-    // also most likely be smi comparisons.
-    bool is_loop_condition = (node->AsExpression() != NULL)
-        && node->AsExpression()->is_loop_condition();
-    bool inline_number_compare =
-        loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
-    // Left and right needed in registers for the following code.
-    left_side.ToRegister();
-    right_side.ToRegister();
-
-    if (known_non_smi) {
-      // Inlined equality check:
-      // If at least one of the objects is not NaN, then if the objects
-      // are identical, they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmpq(left_side.reg(), right_side.reg());
-        dest->true_target()->Branch(equal);
-      }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
-      CompareStub stub(cc, strict, flags);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
-      answer.Unuse();
-      dest->Split(cc);
-    } else {
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Register right_reg = right_side.reg();
-
-      // In-line check for comparing two smis.
-      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
-      if (has_valid_frame()) {
-        // Inline the equality check if both operands can't be a NaN. If both
-        // objects are the same they are equal.
-        if (nan_info == kCantBothBeNaN && cc == equal) {
-          __ cmpq(left_side.reg(), right_side.reg());
-          dest->true_target()->Branch(equal);
-        }
-
-        // Inlined number comparison:
-        if (inline_number_compare) {
-          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-        }
-
-        // End of in-line compare, call out to the compare stub. Don't include
-        // number comparison in the stub if it was inlined.
-        CompareFlags flags =
-            ComputeCompareFlags(nan_info, inline_number_compare);
-        CompareStub stub(cc, strict, flags);
-        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-        __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
-        answer.Unuse();
-        if (is_smi.is_linked()) {
-          dest->true_target()->Branch(cc);
-          dest->false_target()->Jump();
-        } else {
-          dest->Split(cc);
-        }
-      }
-
-      if (is_smi.is_linked()) {
-        is_smi.Bind();
-        left_side = Result(left_reg);
-        right_side = Result(right_reg);
-        __ SmiCompare(left_side.reg(), right_side.reg());
-        right_side.Unuse();
-        left_side.Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
-                                          bool strict,
-                                          ControlDestination* dest,
-                                          Result* left_side,
-                                          Result* right_side,
-                                          bool left_side_constant_smi,
-                                          bool right_side_constant_smi,
-                                          bool is_loop_condition) {
-  if (left_side_constant_smi && right_side_constant_smi) {
-    // Trivial case, comparing two constants.
-    int left_value = Smi::cast(*left_side->handle())->value();
-    int right_value = Smi::cast(*right_side->handle())->value();
-    switch (cc) {
-      case less:
-        dest->Goto(left_value < right_value);
-        break;
-      case equal:
-        dest->Goto(left_value == right_value);
-        break;
-      case greater_equal:
-        dest->Goto(left_value >= right_value);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  } else {
-    // Only one side is a constant Smi.
-    // If left side is a constant Smi, reverse the operands.
-    // Since one side is a constant Smi, conversion order does not matter.
-    if (left_side_constant_smi) {
-      Result* temp = left_side;
-      left_side = right_side;
-      right_side = temp;
-      cc = ReverseCondition(cc);
-      // This may re-introduce greater or less_equal as the value of cc.
-      // CompareStub and the inline code both support all values of cc.
-    }
-    // Implement comparison against a constant Smi, inlining the case
-    // where both sides are smis.
-    left_side->ToRegister();
-    Register left_reg = left_side->reg();
-    Smi* constant_smi = Smi::cast(*right_side->handle());
-
-    if (left_side->is_smi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(left_reg);
-      }
-      // Test smi equality and comparison by signed int comparison.
-      __ SmiCompare(left_reg, constant_smi);
-      left_side->Unuse();
-      right_side->Unuse();
-      dest->Split(cc);
-    } else {
-      // Only the case where the left side could possibly be a non-smi is left.
-      JumpTarget is_smi;
-      if (cc == equal) {
-        // We can do the equality comparison before the smi check.
-        __ Cmp(left_reg, constant_smi);
-        dest->true_target()->Branch(equal);
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        dest->false_target()->Branch(left_is_smi);
-      } else {
-        // Do the smi check, then the comparison.
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        is_smi.Branch(left_is_smi, left_side, right_side);
-      }
-
-      // Jump or fall through to here if we are comparing a non-smi to a
-      // constant smi.  If the non-smi is a heap number and this is not
-      // a loop condition, inline the floating point code.
-      if (!is_loop_condition) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        JumpTarget not_number;
-        __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        not_number.Branch(not_equal, left_side);
-        __ movsd(xmm1,
-                 FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = constant_smi->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ movl(temp.reg(), Immediate(value));
-          __ cvtlsi2sd(xmm0, temp.reg());
-          temp.Unuse();
-        }
-        __ ucomisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, left_side);
-        left_side->Unuse();
-        dest->true_target()->Branch(DoubleCondition(cc));
-        dest->false_target()->Jump();
-        not_number.Bind(left_side);
-      }
-
-      // Setup and call the compare stub.
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, left_side, right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      if (cc == equal) {
-        dest->Split(cc);
-      } else {
-        dest->true_target()->Branch(cc);
-        dest->false_target()->Jump();
-
-        // It is important for performance for this case to be at the end.
-        is_smi.Bind(left_side, right_side);
-        __ SmiCompare(left_reg, constant_smi);
-        left_side->Unuse();
-        right_side->Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
-                                  Result* operand,
-                                  XMMRegister xmm_reg,
-                                  Result* left_side,
-                                  Result* right_side,
-                                  JumpTarget* not_numbers) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-  } else {
-    // Operand type not known, check for smi or heap number.
-    Label smi;
-    __ JumpIfSmi(operand->reg(), &smi);
-    if (!operand->type_info().IsNumber()) {
-      __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-      __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-              kScratchRegister);
-      not_numbers->Branch(not_equal, left_side, right_side, taken);
-    }
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-
-    __ bind(&smi);
-    // Comvert smi to float and keep the original smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
-                                                   Result* right_side,
-                                                   Condition cc,
-                                                   ControlDestination* dest) {
-  ASSERT(left_side->is_register());
-  ASSERT(right_side->is_register());
-
-  JumpTarget not_numbers;
-  // Load left and right operand into registers xmm0 and xmm1 and compare.
-  LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
-                        &not_numbers);
-  LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
-                        &not_numbers);
-  __ ucomisd(xmm0, xmm1);
-  // Bail out if a NaN is involved.
-  not_numbers.Branch(parity_even, left_side, right_side);
-
-  // Split to destination targets based on comparison.
-  left_side->Unuse();
-  right_side->Unuse();
-  dest->true_target()->Branch(DoubleCondition(cc));
-  dest->false_target()->Jump();
-
-  not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-    frame_->SpillTop();
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  Result answer = frame_->CallStub(&call_function, arg_count + 1);
-  // Restore context and replace function on the stack with the
-  // result of the stub invocation.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  frame()->Dup();
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame()->Push(name);
-  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  __ nop();
-  frame()->Push(&answer);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of frame at this point:
-  // Frame[0]: arguments object of the current function or the hole.
-  // Frame[1]: receiver
-  // Frame[2]: applicand.apply
-  // Frame[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  frame_->Dup();
-  Result probe = frame_->Pop();
-  { VirtualFrame::SpilledScope spilled_scope;
-    Label slow, done;
-    bool try_lazy = true;
-    if (probe.is_constant()) {
-      try_lazy = probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      __ j(not_equal, &slow);
-    }
-
-    if (try_lazy) {
-      Label build_args;
-      // Get rid of the arguments object probe.
-      frame_->Drop();  // Can be called on a spilled frame.
-      // Stack now has 3 elements on it.
-      // Contents of stack at this point:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // Check that the receiver really is a JavaScript object.
-      __ movq(rax, Operand(rsp, 0));
-      Condition is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      // We allow all JSObjects including JSFunctions.  As long as
-      // JS_FUNCTION_TYPE is the last instance type and it is right
-      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-      // bound.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-      __ j(below, &build_args);
-
-      // Check that applicand.apply is Function.prototype.apply.
-      __ movq(rax, Operand(rsp, kPointerSize));
-      is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-      __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
-      __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply();
-      __ Cmp(rcx, apply_code);
-      __ j(not_equal, &build_args);
-
-      // Check that applicand is a function.
-      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
-      is_smi = masm_->CheckSmi(rdi);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-
-      // Copy the arguments to this function possibly from the
-      // adaptor frame below it.
-      Label invoke, adapted;
-      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-      __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
-             Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-      __ j(equal, &adapted);
-
-      // No arguments adaptor frame. Copy fixed number of arguments.
-      __ Set(rax, scope()->num_parameters());
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        __ push(frame_->ParameterAt(i));
-      }
-      __ jmp(&invoke);
-
-      // Arguments adaptor frame present. Copy arguments from there, but
-      // avoid copying too many arguments to avoid stack overflows.
-      __ bind(&adapted);
-      static const uint32_t kArgumentsLimit = 1 * KB;
-      __ SmiToInteger32(rax,
-                        Operand(rdx,
-                                ArgumentsAdaptorFrameConstants::kLengthOffset));
-      __ movl(rcx, rax);
-      __ cmpl(rax, Immediate(kArgumentsLimit));
-      __ j(above, &build_args);
-
-      // Loop through the arguments pushing them onto the execution
-      // stack. We don't inform the virtual frame of the push, so we don't
-      // have to worry about getting rid of the elements from the virtual
-      // frame.
-      Label loop;
-      // rcx is a small non-negative integer, due to the test above.
-      __ testl(rcx, rcx);
-      __ j(zero, &invoke);
-      __ bind(&loop);
-      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
-      __ decl(rcx);
-      __ j(not_zero, &loop);
-
-      // Invoke the function.
-      __ bind(&invoke);
-      ParameterCount actual(rax);
-      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-      // Drop applicand.apply and applicand from the stack, and push
-      // the result of the function call, but leave the spilled frame
-      // unchanged, with 3 elements, so it is correct when we compile the
-      // slow-case code.
-      __ addq(rsp, Immediate(2 * kPointerSize));
-      __ push(rax);
-      // Stack now has 1 element:
-      //   rsp[0]: result
-      __ jmp(&done);
-
-      // Slow-case: Allocate the arguments object since we know it isn't
-      // there, and fall-through to the slow-case where we call
-      // applicand.apply.
-      __ bind(&build_args);
-      // Stack now has 3 elements, because we have jumped from where:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // StoreArgumentsObject requires a correct frame, and may modify it.
-      Result arguments_object = StoreArgumentsObject(false);
-      frame_->SpillAll();
-      arguments_object.ToRegister();
-      frame_->EmitPush(arguments_object.reg());
-      arguments_object.Unuse();
-      // Stack and frame now have 4 elements.
-      __ bind(&slow);
-    }
-
-    // Generic computation of x.apply(y, args) with no special optimization.
-    // Flip applicand.apply and applicand on the stack, so
-    // applicand looks like the receiver of the applicand.apply call.
-    // Then process it as a normal function call.
-    __ movq(rax, Operand(rsp, 3 * kPointerSize));
-    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-    __ movq(Operand(rsp, 2 * kPointerSize), rax);
-    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
-    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-    Result res = frame_->CallStub(&call_function, 3);
-    // The function and its two arguments have been dropped.
-    frame_->Drop(1);  // Drop the receiver as well.
-    res.ToRegister();
-    frame_->EmitPush(res.reg());
-    // Stack now has 1 element:
-    //   rsp[0]: result
-    if (try_lazy) __ bind(&done);
-  }  // End of spilled scope.
-  // Restore the context register after a call.
-  frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
-  DeferredStackCheck() {
-    set_comment("[ DeferredStackCheck");
-  }
-
-  virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
-  StackCheckStub stub;
-  __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
-  DeferredStackCheck* deferred = new DeferredStackCheck;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  deferred->Branch(below);
-  deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Visit(statement);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  VisitStatements(statements);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.  The inevitable call
-  // will sync frame elements to memory anyway, so we do it eagerly to
-  // allow us to push the arguments directly into place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
-  frame_->EmitPush(rsi);  // The context is the first argument.
-  frame_->EmitPush(kScratchRegister);
-  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
-  frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
-  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.  Sync the virtual frame eagerly
-    // so we can simply push the arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    // Declaration nodes are always introduced in one of two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Smi::FromInt(attr));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-  if (val != NULL) {
-    {
-      // Set the initial value.
-      Reference target(this, node->proxy());
-      Load(val);
-      target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
-    }
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  // Remove the lingering expression result from the top of stack.
-  frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    JumpTarget then;
-    JumpTarget else_;
-    ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The else target was bound, so we compile the else part first.
-      Visit(node->else_statement());
-
-      // We may have dangling jumps to the then part.
-      if (then.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then target was bound, so we compile the then part first.
-      Visit(node->then_statement());
-
-      if (else_.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    }
-
-  } else if (has_then_stm) {
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // then part.
-      if (then.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then label was bound.
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.true_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // else part.
-      if (else_.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    } else {
-      // The else label was bound.
-      Visit(node->else_statement());
-    }
-
-  } else {
-    ASSERT(!has_then_stm && !has_else_stm);
-    // We only care about the condition's side effects (not its value
-    // or control flow effect).  LoadCondition is called without
-    // forcing control flow.
-    ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), &dest, false);
-    if (!dest.is_used()) {
-      // We got a value on the frame rather than (or in addition to)
-      // control flow.
-      frame_->Drop();
-    }
-  }
-
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result return_value = frame_->Pop();
-  masm()->positions_recorder()->WriteRecordedPositions();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump(&return_value);
-  } else {
-    frame_->PrepareForReturn();
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump(&return_value);
-    } else {
-      function_return_.Bind(&return_value);
-      GenerateReturnSequence(&return_value);
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
-  // The return value is a live (but not currently reference counted)
-  // reference to rax.  This is safe because the current frame does not
-  // contain a reference to rax (it is prepared for the return by spilling
-  // all registers).
-  if (FLAG_trace) {
-    frame_->Push(return_value);
-    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-  return_value->ToRegister(rax);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-
-  // Leave the frame and return popping the arguments and the
-  // receiver.
-  frame_->Exit();
-  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
-  __ Ret(arguments_bytes, rcx);
-  DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Add padding that will be overwritten by a debugger breakpoint.
-  // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
-  // with length 7 (3 + 1 + 3).
-  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
-  for (int i = 0; i < kPadding; ++i) {
-    masm_->int3();
-  }
-  // Check that the size of the code used for returning is large enough
-  // for the debugger's requirements.
-  ASSERT(Assembler::kJSReturnSequenceLength <=
-         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result context;
-  if (node->is_catch_block()) {
-    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    context = frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  // Update context local.
-  frame_->SaveContextRegister();
-
-  // Verify that the runtime call result and rsi agree.
-  if (FLAG_debug_code) {
-    __ cmpq(context.reg(), rsi);
-    __ Assert(equal, "Runtime::NewContext should end up in rsi");
-  }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
-  // Update context local.
-  frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  // Compile the switch value.
-  Load(node->tag());
-
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  JumpTarget next_test;
-  // Compile the case label expressions and comparisons.  Exit early
-  // if a comparison is unconditionally true.  The target next_test is
-  // bound before the loop in order to indicate control flow to the
-  // first comparison.
-  next_test.Bind();
-  for (int i = 0; i < length && !next_test.is_unused(); i++) {
-    CaseClause* clause = cases->at(i);
-    // The default is not a test, but remember it for later.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    // We recycle the same target next_test for each test.  Bind it if
-    // the previous test has not done so and then unuse it for the
-    // loop.
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    next_test.Unuse();
-
-    // Duplicate the switch value.
-    frame_->Dup();
-
-    // Compile the label expression.
-    Load(clause->label());
-
-    // Compare and branch to the body if true or the next test if
-    // false.  Prefer the next test as a fall through.
-    ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(node, equal, true, &dest);
-
-    // If the comparison fell through to the true target, jump to the
-    // actual body.
-    if (dest.true_was_fall_through()) {
-      clause->body_target()->Unuse();
-      clause->body_target()->Jump();
-    }
-  }
-
-  // If there was control flow to a next test from the last one
-  // compiled, compile a jump to the default or break target.
-  if (!next_test.is_unused()) {
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    // Drop the switch value.
-    frame_->Drop();
-    if (default_clause != NULL) {
-      default_clause->body_target()->Jump();
-    } else {
-      node->break_target()->Jump();
-    }
-  }
-
-  // The last instruction emitted was a jump, either to the default
-  // clause or the break target, or else to a case body from the loop
-  // that compiles the tests.
-  ASSERT(!has_valid_frame());
-  // Compile case bodies as needed.
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-
-    // There are two ways to reach the body: from the corresponding
-    // test or as the fall through of the previous body.
-    if (clause->body_target()->is_linked() || has_valid_frame()) {
-      if (clause->body_target()->is_linked()) {
-        if (has_valid_frame()) {
-          // If we have both a jump to the test and a fall through, put
-          // a jump on the fall through path to avoid the dropping of
-          // the switch value on the test path.  The exception is the
-          // default which has already had the switch value dropped.
-          if (clause->is_default()) {
-            clause->body_target()->Bind();
-          } else {
-            JumpTarget body;
-            body.Jump();
-            clause->body_target()->Bind();
-            frame_->Drop();
-            body.Bind();
-          }
-        } else {
-          // No fall through to worry about.
-          clause->body_target()->Bind();
-          if (!clause->is_default()) {
-            frame_->Drop();
-          }
-        }
-      } else {
-        // Otherwise, we have only fall through.
-        ASSERT(has_valid_frame());
-      }
-
-      // We are now prepared to compile the body.
-      Comment cmnt(masm_, "[ Case body");
-      VisitStatements(clause->statements());
-    }
-    clause->body_target()->Unuse();
-  }
-
-  // We may not have a valid frame here so bind the break target only
-  // if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  // Label the top of the loop for the backward jump if necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // Use the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      // No need to label it.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      break;
-    case DONT_KNOW:
-      // Continue is the test, so use the backward body target.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control flow can fall off the end of the body, jump back
-      // to the top and bind the break target at the exit.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // We may have had continues or breaks in the body.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), &dest, true);
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-  }
-
-  DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop with the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is the test at the bottom, no need to label the test
-        // at the top.  The body is a backward target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Label the test at the top as the continue target.  The body
-        // is a forward-only target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      }
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // The loop body has been labeled with the continue target.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        // If we have chosen to recompile the test at the bottom,
-        // then it is the continue target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here and thus an invalid fall-through).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // If we have chosen not to recompile the test at the bottom,
-        // jump back to the one at the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
-  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
-  if (slot->type() == Slot::LOCAL) {
-    frame_->SetTypeForLocalAt(slot->index(), info);
-  } else {
-    frame_->SetTypeForParamAt(slot->index(), info);
-  }
-  if (FLAG_debug_code && info.IsSmi()) {
-    if (slot->type() == Slot::LOCAL) {
-      frame_->PushLocalAt(slot->index());
-    } else {
-      frame_->PushParameterAt(slot->index());
-    }
-    Result var = frame_->Pop();
-    var.ToRegister();
-    __ AbortIfNotSmi(var.reg());
-  }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
-  // A fast smi loop is a for loop with an initializer
-  // that is a simple assignment of a smi to a stack variable,
-  // a test that is a simple test of that variable against a smi constant,
-  // and a step that is a increment/decrement of the variable, and
-  // where the variable isn't modified in the loop body.
-  // This guarantees that the variable is always a smi.
-
-  Variable* loop_var = node->loop_variable();
-  Smi* initial_value = *Handle<Smi>::cast(node->init()
-      ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
-  Smi* limit_value = *Handle<Smi>::cast(
-      node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
-  Token::Value compare_op =
-      node->cond()->AsCompareOperation()->op();
-  bool increments =
-      node->next()->StatementAsCountOperation()->op() == Token::INC;
-
-  // Check that the condition isn't initially false.
-  bool initially_false = false;
-  int initial_int_value = initial_value->value();
-  int limit_int_value = limit_value->value();
-  switch (compare_op) {
-    case Token::LT:
-      initially_false = initial_int_value >= limit_int_value;
-      break;
-    case Token::LTE:
-      initially_false = initial_int_value > limit_int_value;
-      break;
-    case Token::GT:
-      initially_false = initial_int_value <= limit_int_value;
-      break;
-    case Token::GTE:
-      initially_false = initial_int_value < limit_int_value;
-      break;
-    default:
-      UNREACHABLE();
-  }
-  if (initially_false) return;
-
-  // Only check loop condition at the end.
-
-  Visit(node->init());
-
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  // Set type and stack height of BreakTargets.
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  IncrementLoopNesting();
-  loop.Bind();
-
-  // Set number type of the loop variable to smi.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
-  Visit(node->body());
-
-  if (node->continue_target()->is_linked()) {
-    node->continue_target()->Bind();
-  }
-
-  if (has_valid_frame()) {
-    CodeForStatementPosition(node);
-    Slot* loop_var_slot = loop_var->AsSlot();
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->TakeLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->TakeParameterAt(loop_var_slot->index());
-    }
-    Result loop_var_result = frame_->Pop();
-    if (!loop_var_result.is_register()) {
-      loop_var_result.ToRegister();
-    }
-    Register loop_var_reg = loop_var_result.reg();
-    frame_->Spill(loop_var_reg);
-    if (increments) {
-      __ SmiAddConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    } else {
-      __ SmiSubConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    }
-
-    frame_->Push(&loop_var_result);
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->StoreToParameterAt(loop_var_slot->index());
-    }
-    frame_->Drop();
-
-    __ SmiCompare(loop_var_reg, limit_value);
-    Condition condition;
-    switch (compare_op) {
-      case Token::LT:
-        condition = less;
-        break;
-      case Token::LTE:
-        condition = less_equal;
-        break;
-      case Token::GT:
-        condition = greater;
-        break;
-      case Token::GTE:
-        condition = greater_equal;
-        break;
-      default:
-        condition = never;
-        UNREACHABLE();
-    }
-    loop.Branch(condition);
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-
-  if (node->is_fast_smi_loop()) {
-    GenerateFastSmiLoop(node);
-    return;
-  }
-
-  // Compile the init expression if present.
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything else.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-
-  // Target for backward edge if no test at the bottom, otherwise
-  // unused.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-  // Target for backward edge if there is a test at the bottom,
-  // otherwise used as target for test at the top.
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop.
-      if (node->next() == NULL) {
-        // Use the continue target if there is no update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // Otherwise use the backward loop target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is either the update expression or the test at the
-        // bottom, no need to label the test at the top.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else if (node->next() == NULL) {
-        // We are not recompiling the test at the bottom and there is no
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // We are not recompiling the test at the bottom and there is an
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  Visit(node->body());
-
-  // If there is an update expression, compile it if necessary.
-  if (node->next() != NULL) {
-    if (node->continue_target()->is_linked()) {
-      node->continue_target()->Bind();
-    }
-
-    // Control can reach the update by falling out of the body or by a
-    // continue.
-    if (has_valid_frame()) {
-      // Record the source position of the statement as this code which
-      // is after the code for the body actually belongs to the loop
-      // statement and not the body.
-      CodeForStatementPosition(node);
-      Visit(node->next());
-    }
-  }
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      if (has_valid_frame()) {
-        if (node->next() == NULL) {
-          node->continue_target()->Jump();
-        } else {
-          loop.Jump();
-        }
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        if (node->continue_target()->is_linked()) {
-          // We can have dangling jumps to the continue target if there
-          // was no update expression.
-          node->continue_target()->Bind();
-        }
-        // Control can reach the test at the bottom by falling out of
-        // the body, by a continue in the body, or from the update
-        // expression.
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // Otherwise, jump back to the test at the top.
-        if (has_valid_frame()) {
-          if (node->next() == NULL) {
-            node->continue_target()->Jump();
-          } else {
-            loop.Jump();
-          }
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  LoadAndSpill(node->enumerable());
-
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(rax);
-
-  // rax: value to be iterated over
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  exit.Branch(equal);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  exit.Branch(equal);
-
-  // Stack layout in body:
-  // [iteration counter (smi)] <- slot 0
-  // [length of array]         <- slot 1
-  // [FixedArray]              <- slot 2
-  // [Map or 0]                <- slot 3
-  // [Object]                  <- slot 4
-
-  // Check if enumerable is already a JSObject
-  // rax: value to be iterated over
-  Condition is_smi = masm_->CheckSmi(rax);
-  primitive.Branch(is_smi);
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-  jsobject.Branch(above_equal);
-
-  primitive.Bind();
-  frame_->EmitPush(rax);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
-  // function call returns the value in rax, which is where we want it below
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // rax: value to be iterated over
-  frame_->EmitPush(rax);  // Push the object being iterated over.
-
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ movq(rcx, rax);
-  loop.Bind();
-  // Check that there are no elements.
-  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
-  call_runtime.Branch(equal);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
-  is_smi = masm_->CheckSmi(rdx);
-  call_runtime.Branch(is_smi);
-  // For all objects but the receiver, check that the cache is empty.
-  __ cmpq(rcx, rax);
-  check_prototype.Branch(equal);
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
-  loop.Branch(not_equal);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a Map, we can do a fast modification check.
-  // Otherwise, we got a FixedArray, and we have to do a slow check.
-  // rax: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ movq(rdx, rax);
-  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
-  fixed_array.Branch(not_equal);
-
-  use_cache.Bind();
-  // Get enum cache
-  // rax: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ movq(rcx, rax);
-  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
-  // Get the bridge array held in the enumeration index field.
-  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
-  // Get the cache from the bridge array.
-  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(rax);  // <- slot 3
-  frame_->EmitPush(rdx);  // <- slot 2
-  __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-  entry.Jump();
-
-  fixed_array.Bind();
-  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
-  frame_->EmitPush(rax);  // <- slot 2
-
-  // Push the length of the array and the initial index onto the stack.
-  __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-
-  // Condition.
-  entry.Bind();
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
-  node->break_target()->Branch(below_equal);
-
-  // Get the i'th entry of the array.
-  __ movq(rdx, frame_->ElementAt(2));
-  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx,
-          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
-  // Get the expected map from the stack or a zero map in the
-  // permanent slow case rax: current iteration count rbx: i'th entry
-  // of the enum cache
-  __ movq(rdx, frame_->ElementAt(3));
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
-  // rax: current iteration count
-  // rbx: i'th entry of the enum cache
-  // rdx: expected map value
-  __ movq(rcx, frame_->ElementAt(4));
-  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ cmpq(rcx, rdx);
-  end_del_check.Branch(equal);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(rbx);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
-  __ movq(rbx, rax);
-
-  // If the property has been removed while iterating, we just skip it.
-  __ Cmp(rbx, Smi::FromInt(0));
-  node->continue_target()->Branch(equal);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  rdx: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(rbx);
-  { Reference each(this, node->each());
-    // Loading a reference may leave the frame in an unspilled state.
-    frame_->SpillAll();
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        frame_->EmitPush(frame_->ElementAt(each.size()));
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop(2);  // Drop the original and the copy of the element.
-      } else {
-        // If the reference has size zero then we can use the value below
-        // the reference as if it were above the reference, instead of pushing
-        // a new copy of it above the reference.
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop();  // Drop the original of the element.
-      }
-    }
-  }
-  // Unloading a reference may leave the frame in an unspilled state.
-  frame_->SpillAll();
-
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  VisitAndSpill(node->body());
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(rax);
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  frame_->EmitPush(rax);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(rax);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  VisitStatementsAndSpill(node->catch_block()->statements());
-  if (has_valid_frame()) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // Make sure that there's nothing left on the stack above the
-  // handler structure.
-  if (FLAG_debug_code) {
-    __ movq(kScratchRegister, handler_address);
-    __ cmpq(rsp, Operand(kScratchRegister, 0));
-    __ Assert(equal, "stack pointer should point to top handler");
-  }
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing targets that
-  // have been jumped to.  Deallocate each shadow target.
-  Result return_value;
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain; be careful not to destroy the TOS if
-      // there is one.
-      if (i == kReturnShadowIndex) {
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
-        shadows[i]->other_target()->Jump(&return_value);
-      } else {
-        shadows[i]->other_target()->Jump();
-      }
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(rax);
-  // In case of thrown exceptions, this is where we continue.
-  __ Move(rcx, Smi::FromInt(THROWING));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in ecx, then jump around the unlink blocks if any.
-    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-    __ Move(rcx, Smi::FromInt(FALLING));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // on the virtual frame.  We must preserve it until it is
-      // pushed.
-      if (i == kReturnShadowIndex) {
-        Result return_value;
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this target shadowed the function return, materialize
-        // the return value on the stack.
-        frame_->EmitPush(rax);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-      }
-      __ Move(rcx, Smi::FromInt(JUMPING + i));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(rcx);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  VisitStatementsAndSpill(node->finally_block()->statements());
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(rcx);
-    frame_->EmitPop(rax);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      BreakTarget* original = shadows[i]->other_target();
-      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
-      if (i == kReturnShadowIndex) {
-        // The return value is (already) in rax.
-        Result return_value = allocator_->Allocate(rax);
-        ASSERT(return_value.is_valid());
-        if (function_return_is_shadowed_) {
-          original->Branch(equal, &return_value);
-        } else {
-          // Branch around the preparation for return which may emit
-          // code.
-          JumpTarget skip;
-          skip.Branch(not_equal);
-          frame_->PrepareForReturn();
-          original->Jump(&return_value);
-          skip.Bind();
-        }
-      } else {
-        original->Branch(equal);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ SmiCompare(rcx, Smi::FromInt(THROWING));
-    exit.Branch(not_equal);
-
-    // Rethrow exception.
-    frame_->EmitPush(rax);  // undo pop from above
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Spill everything, even constants, to the frame.
-  frame_->SpillAll();
-
-  frame_->DebugBreak();
-  // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // The inevitable call will sync frame elements to memory anyway, so
-  // we do it eagerly to allow us to push the arguments directly into
-  // place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame_->Push(function_info);
-    Result answer = frame_->CallStub(&stub, 1);
-    frame_->Push(&answer);
-  } else {
-    // Call the runtime to instantiate the function based on the
-    // shared function info.
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(function_info);
-    frame_->EmitPush(pretenure
-                     ? FACTORY->true_value()
-                     : FACTORY->false_value());
-    Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
-    frame_->Push(&result);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  // Check for stack-overflow exception.
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    return;
-  }
-  InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then;
-  JumpTarget else_;
-  JumpTarget exit;
-  ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), &dest, true);
-
-  if (dest.false_was_fall_through()) {
-    // The else target was bound, so we compile the else part first.
-    Load(node->else_expression());
-
-    if (then.is_linked()) {
-      exit.Jump();
-      then.Bind();
-      Load(node->then_expression());
-    }
-  } else {
-    // The then target was bound, so we compile the then part first.
-    Load(node->then_expression());
-
-    if (else_.is_linked()) {
-      exit.Jump();
-      else_.Bind();
-      Load(node->else_expression());
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    JumpTarget slow;
-    JumpTarget done;
-    Result value;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &value,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // A runtime call is inevitable.  We eagerly sync frame elements
-    // to memory so that we can push the arguments directly into place
-    // on top of the frame.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    if (typeof_state == INSIDE_TYPEOF) {
-       value =
-         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind(&value);
-    frame_->Push(&value);
-
-  } else if (slot->var()->mode() == Variable::CONST) {
-    // Const slots may contain 'the hole' value (the constant hasn't been
-    // initialized yet) which needs to be converted into the 'undefined'
-    // value.
-    //
-    // We currently spill the virtual frame because constants use the
-    // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope;
-    Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit;
-    __ movq(rcx, SlotOperand(slot, rcx));
-    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-    exit.Branch(not_equal);
-    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
-    exit.Bind();
-    frame_->EmitPush(rcx);
-
-  } else if (slot->type() == Slot::PARAMETER) {
-    frame_->PushParameterAt(slot->index());
-
-  } else if (slot->type() == Slot::LOCAL) {
-    frame_->PushLocalAt(slot->index());
-
-  } else {
-    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
-    // here.
-    //
-    // The use of SlotOperand below is safe for an unspilled frame
-    // because it will always be a context slot.
-    ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Pop the loaded value from the stack.
-  Result value = frame_->Pop();
-
-  // If the loaded value is a constant, we know if the arguments
-  // object has been lazily loaded yet.
-  if (value.is_constant()) {
-    if (value.handle()->IsArgumentsMarker()) {
-      Result arguments = StoreArgumentsObject(false);
-      frame_->Push(&arguments);
-    } else {
-      frame_->Push(&value);
-    }
-    return;
-  }
-
-  // The loaded value is in a register. If it is the sentinel that
-  // indicates that we haven't loaded the arguments object yet, we
-  // need to do it now.
-  JumpTarget exit;
-  __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
-  frame_->Push(&value);
-  exit.Branch(not_equal);
-  Result arguments = StoreArgumentsObject(false);
-  frame_->SetElementAt(0, &arguments);
-  exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    JumpTarget* slow) {
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register context = rsi;
-  Result tmp = allocator_->Allocate();
-  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      // Load next context in chain.
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(tmp.reg())) {
-      __ movq(tmp.reg(), context);
-    }
-    // Load map for comparison into register, outside loop.
-    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
-    __ bind(&next);
-    // Terminate at global context.
-    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
-    __ j(equal, &fast);
-    // Check that extension is NULL.
-    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
-    slow->Branch(not_equal);
-    // Load next context in chain.
-    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
-    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-  tmp.Unuse();
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  LoadGlobal();
-  frame_->Push(slot->var()->name());
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT;
-  Result answer = frame_->CallLoadIC(mode);
-  // A test rax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test rax
-  // instruction here.
-  masm_->nop();
-  return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    Result* result,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    done->Jump(result);
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      // Allocate a fresh register to use as a temp in
-      // ContextSlotOperandCheckExtensions and to hold the result
-      // value.
-      *result = allocator_->Allocate();
-      ASSERT(result->is_valid());
-      __ movq(result->reg(),
-              ContextSlotOperandCheckExtensions(potential_slot,
-                                                *result,
-                                                slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
-        done->Branch(not_equal, result);
-        __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
-      }
-      done->Jump(result);
-    } else if (rewrite != NULL) {
-      // Generate fast case for argument loads.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          Result arguments = allocator()->Allocate();
-          ASSERT(arguments.is_valid());
-          __ movq(arguments.reg(),
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                    arguments,
-                                                    slow));
-          frame_->Push(&arguments);
-          frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad();
-          done->Jump(result);
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.  Since the call is inevitable,
-    // we eagerly sync the virtual frame so we can directly push the
-    // arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(slot->var()->name());
-
-    Result value;
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize const
-      // properties (introduced via eval("const foo = (some expr);")). Also,
-      // uses the current function context instead of the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the same
-      // time, because the const declaration may be at the end of the eval
-      // code (sigh...) and the const variable may have been used before
-      // (where its value is 'undefined'). Thus, we can only do the
-      // initialization when we actually encounter the expression and when
-      // the expression operands are defined and valid, and thus we need the
-      // split into 2 operations: declaration of the context slot followed
-      // by initialization.
-      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling chained assignment
-    // expressions.
-    frame_->Push(&value);
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is executed,
-      // the code is identical to a normal store (see below).
-      //
-      // We spill the frame in the code below because the direct-frame
-      // access of SlotOperand is potentially unsafe with an unspilled
-      // frame.
-      VirtualFrame::SpilledScope spilled_scope;
-      Comment cmnt(masm_, "[ Init const");
-      __ movq(rcx, SlotOperand(slot, rcx));
-      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-      exit.Branch(not_equal);
-    }
-
-    // We must execute the store.  Storing a variable must keep the (new)
-    // value on the stack. This is necessary for compiling assignment
-    // expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will initialize
-    // consts to 'the hole' value and by doing so, end up calling this code.
-    if (slot->type() == Slot::PARAMETER) {
-      frame_->StoreToParameterAt(slot->index());
-    } else if (slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(slot->index());
-    } else {
-      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
-      //
-      // The use of SlotOperand below is safe for an unspilled frame
-      // because the slot is a context slot.
-      ASSERT(slot->type() == Slot::CONTEXT);
-      frame_->Dup();
-      Result value = frame_->Pop();
-      value.ToRegister();
-      Result start = allocator_->Allocate();
-      ASSERT(start.is_valid());
-      __ movq(SlotOperand(slot, start.reg()), value.reg());
-      // RecordWrite may destroy the value registers.
-      //
-      // TODO(204): Avoid actually spilling when the value is not
-      // needed (probably the common case).
-      frame_->Spill(value.reg());
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
-      // The results start, value, and temp are unused by going out of
-      // scope.
-    }
-
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  Comment cmnt(masm_, "[ Literal");
-  frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
-  UNIMPLEMENTED();
-  // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
-  return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function.  Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
-  DeferredRegExpLiteral(Register boilerplate,
-                        Register literals,
-                        RegExpLiteral* node)
-      : boilerplate_(boilerplate), literals_(literals), node_(node) {
-    set_comment("[ DeferredRegExpLiteral");
-  }
-
-  void Generate();
-
- private:
-  Register boilerplate_;
-  Register literals_;
-  RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
-  // Since the entry is undefined we call the runtime system to
-  // compute the literal.
-  // Literal array (0).
-  __ push(literals_);
-  // Literal index (1).
-  __ Push(Smi::FromInt(node_->literal_index()));
-  // RegExp pattern (2).
-  __ Push(node_->pattern());
-  // RegExp flags (3).
-  __ Push(node_->flags());
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
-  DeferredAllocateInNewSpace(int size,
-                             Register target,
-                             int registers_to_save = 0)
-    : size_(size), target_(target), registers_to_save_(registers_to_save) {
-    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
-    set_comment("[ DeferredAllocateInNewSpace");
-  }
-  void Generate();
-
- private:
-  int size_;
-  Register target_;
-  int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
-  for (int i = 0; i < kNumRegs; i++) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ push(save_register);
-    }
-  }
-  __ Push(Smi::FromInt(size_));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  if (!target_.is(rax)) {
-    __ movq(target_, rax);
-  }
-  for (int i = kNumRegs - 1; i >= 0; i--) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ pop(save_register);
-    }
-  }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  Comment cmnt(masm_, "[ RegExp Literal");
-
-  // Retrieve the literals array and check the allocated entry.  Begin
-  // with a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  Result boilerplate = allocator_->Allocate();
-  ASSERT(boilerplate.is_valid());
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
-  // Check whether we need to materialize the RegExp object.  If so,
-  // jump to the deferred code passing the literals array.
-  DeferredRegExpLiteral* deferred =
-      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
-  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
-  deferred->Branch(equal);
-  deferred->BindExit();
-
-  // Register of boilerplate contains RegExp object.
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
-  DeferredAllocateInNewSpace* allocate_fallback =
-      new DeferredAllocateInNewSpace(size, literals.reg());
-  frame_->Push(&boilerplate);
-  frame_->SpillTop();
-  __ AllocateInNewSpace(size,
-                        literals.reg(),
-                        tmp.reg(),
-                        no_reg,
-                        allocate_fallback->entry_label(),
-                        TAG_OBJECT);
-  allocate_fallback->BindExit();
-  boilerplate = frame_->Pop();
-  // Copy from boilerplate to clone and return clone.
-
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
-    __ movq(FieldOperand(literals.reg(), i), tmp.reg());
-  }
-  frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
-  frame_->Push(&literals);
-  // Literal index.
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant properties.
-  frame_->Push(node->constant_properties());
-  // Should the object literal have fast elements?
-  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
-  Result clone;
-  if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->Push(&clone);
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through.
-      case ObjectLiteral::Property::COMPUTED: {
-        Handle<Object> key(property->key()->handle());
-        if (key->IsSymbol()) {
-          // Duplicate the object as the IC receiver.
-          frame_->Dup();
-          Load(property->value());
-          if (property->emit_store()) {
-            Result ignored =
-                frame_->CallStoreIC(Handle<String>::cast(key), false,
-                                    strict_mode_flag());
-            // A test rax instruction following the store IC call would
-            // indicate the presence of an inlined version of the
-            // store. Add a nop to indicate that there is no such
-            // inlined version.
-            __ nop();
-          } else {
-            frame_->Drop(2);
-          }
-          break;
-        }
-        // Fall through
-      }
-      case ObjectLiteral::Property::PROTOTYPE: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        Load(property->value());
-        if (property->emit_store()) {
-          frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
-          // Ignore the result.
-          Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          frame_->Drop(3);
-        }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(1));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(0));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      default: UNREACHABLE();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  frame_->Push(&literals);
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  frame_->Push(node->constant_elements());
-  int length = node->values()->length();
-  Result clone;
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
-  } else if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-  }
-  frame_->Push(&clone);
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < length; i++) {
-    Expression* value = node->values()->at(i);
-
-    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
-      continue;
-    }
-
-    // The property must be set by generated code.
-    Load(value);
-
-    // Get the property value off the stack.
-    Result prop_value = frame_->Pop();
-    prop_value.ToRegister();
-
-    // Fetch the array literal while leaving a copy on the stack and
-    // use it to get the elements array.
-    frame_->Dup();
-    Result elements = frame_->Pop();
-    elements.ToRegister();
-    frame_->Spill(elements.reg());
-    // Get the elements FixedArray.
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
-    // Update the write barrier for the array address.
-    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_valid());
-    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
-  }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  ASSERT(!in_spilled_code());
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  Result result =
-      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      frame()->Dup();
-    }
-    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame()->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else if (var != NULL) {
-      // The LoadIC stub expects the object in rax.
-      // Freeing rax causes the code generator to load the global into it.
-      frame_->Spill(rax);
-      LoadGlobal();
-    } else {
-      frame()->Dup();
-    }
-    Result value = EmitNamedLoad(name, var != NULL);
-    frame()->Push(&value);
-    Load(node->value());
-
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    Result value = frame()->Pop();
-    frame()->Push(prop->obj());
-    frame()->Push(&value);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  Result answer = EmitNamedStore(name, is_contextual);
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment.  Swap
-      // the receiver and the value of the assignment expression.
-      Result result = frame()->Pop();
-      Result receiver = frame()->Pop();
-      frame()->Push(&result);
-      frame()->Push(&receiver);
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame()->PushElementAt(1);
-    frame()->PushElementAt(1);
-    Result value = EmitKeyedLoad();
-    frame()->Push(&value);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  Result answer = EmitKeyedStore(prop->key()->type());
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Result result = frame()->Pop();
-    Result receiver = frame()->Pop();
-    frame()->Push(&result);
-    frame()->Push(&receiver);
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame()->Push(&result);
-  }
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  Comment cmnt(masm_, "[ Throw");
-  Load(node->exception());
-  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  Comment cmnt(masm_, "[ Property");
-  Reference property(this, node);
-  property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  Comment cmnt(masm_, "[ Call");
-
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Check if the function is a variable or a property.
-  Expression* function = node->expression();
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare the stack for the call to the resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->Push(FACTORY->undefined_value());
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Result to hold the result of the function resolution and the
-    // final result of the eval call.
-    Result result;
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                                     NOT_INSIDE_TYPEOF,
-                                                     &slow);
-      frame_->Push(&fun);
-      if (arg_count > 0) {
-        frame_->PushElementAt(arg_count);
-      } else {
-        frame_->Push(FACTORY->undefined_value());
-      }
-      frame_->PushParameterAt(-1);
-
-      // Push the strict mode flag.
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-      // Resolve the call.
-      result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump(&result);
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    frame_->PushElementAt(arg_count + 1);
-    if (arg_count > 0) {
-      frame_->PushElementAt(arg_count);
-    } else {
-      frame_->Push(FACTORY->undefined_value());
-    }
-    frame_->PushParameterAt(-1);
-
-    // Push the strict mode flag.
-    frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-    // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind(&result);
-
-    // The runtime call returns a pair of values in rax (function) and
-    // rdx (receiver). Touch up the stack with the right values.
-    Result receiver = allocator_->Allocate(rdx);
-    frame_->SetElementAt(arg_count + 1, &result);
-    frame_->SetElementAt(arg_count, &receiver);
-    receiver.Unuse();
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    result = frame_->CallStub(&call_function, arg_count + 1);
-
-    // Restore the context and overwrite the function on the stack with
-    // the result.
-    frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &result);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Push the name of the function on the frame.
-    frame_->Push(var->name());
-
-    // Call the IC initialization code.
-    CodeForSourcePosition(node->position());
-    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
-                                       arg_count,
-                                       loop_nesting());
-    frame_->RestoreContextRegister();
-    // Replace the function on the stack with the result.
-    frame_->Push(&result);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-    Result function;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &function,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Load the function from the context.  Sync the frame so we can
-    // push the arguments directly into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(var->name());
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // The runtime call returns a pair of values in rax and rdx.  The
-    // looked-up function is in rax and the receiver is in rdx.  These
-    // register references are not ref counted here.  We spill them
-    // eagerly since they are arguments to an inevitable call (and are
-    // not sharable by the arguments).
-    ASSERT(!allocator()->is_used(rax));
-    frame_->EmitPush(rax);
-
-    // Load the receiver.
-    ASSERT(!allocator()->is_used(rdx));
-    frame_->EmitPush(rdx);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind(&function);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      call.Bind();
-    }
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Push the name of the function onto the frame.
-        frame_->Push(name);
-
-        // Call the IC initialization code.
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                           arg_count,
-                                           loop_nesting());
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the function to call from the property through a reference.
-      if (property->is_synthetic()) {
-        Reference ref(this, property, false);
-        ref.GetValue();
-        // Use global object as receiver.
-        LoadGlobalReceiver();
-       // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the name of the function.
-        Load(property->key());
-
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Result key = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&key);
-        frame_->Push(&receiver);
-        key.Unuse();
-        receiver.Unuse();
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Place the key on top of stack and call the IC initialization code.
-        frame_->PushElementAt(arg_count + 1);
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
-                                                arg_count,
-                                                loop_nesting());
-        frame_->Drop();  // Drop the key still on the stack.
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-    }
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver();
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-  }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Result result = frame_->CallConstructor(arg_count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  value.Unuse();
-  destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // Conditionally generate a log call.
-  // Args:
-  //   0 (literal string): The type of logging (corresponds to the flags).
-  //     This is used to determine whether or not to generate the log call.
-  //   1 (string): Format string.  Access the string at argument index 2
-  //     with '%2s' (see Logger::LogRuntime for all the formats).
-  //   2 (array): Arguments to the format string.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
-  value.Unuse();
-  destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ LoadRoot(result_, Heap::kNanValueRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need two extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object.reg(),
-                                   index.reg(),
-                                   scratch.reg(),
-                                   result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Result code = frame_->Pop();
-  code.ToRegister();
-  ASSERT(code.is_valid());
-
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code.reg(), result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ Move(result_, Smi::FromInt(0));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need three extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch1 = allocator()->Allocate();
-  ASSERT(scratch1.is_valid());
-  Result scratch2 = allocator()->Allocate();
-  ASSERT(scratch2.is_valid());
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object.reg(),
-                               index.reg(),
-                               scratch1.reg(),
-                               scratch2.reg(),
-                               result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a JS array or not.
-  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a regexp.
-  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-
-  __ Move(kScratchRegister, FACTORY->null_value());
-  __ cmpq(obj.reg(), kScratchRegister);
-  destination()->true_target()->Branch(equal);
-
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-          Immediate(1 << Map::kIsUndetectable));
-  destination()->false_target()->Branch(not_zero);
-  __ movzxbq(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
-  __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
-  destination()->false_target()->Branch(below);
-  __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
-  obj.Unuse();
-  destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // Check that this is an object.
-  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-      __ Assert(equal, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
-    __ j(equal, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ movq(map_result_,
-           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch1_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
-    __ lea(scratch1_,
-           Operand(
-               map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
-    // Calculate location of the first key name.
-    __ addq(map_result_,
-            Immediate(FixedArray::kHeaderSize +
-                      DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(scratch2_, FieldOperand(map_result_, 0));
-    __ Cmp(scratch2_, FACTORY->value_of_symbol());
-    __ j(equal, &false_result);
-    __ addq(map_result_, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(map_result_, scratch1_);
-    __ j(not_equal, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
-    __ testq(scratch1_, Immediate(kSmiTagMask));
-    __ j(zero, &false_result);
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ movq(scratch2_,
-            Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ movq(scratch2_,
-            FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ cmpq(scratch1_,
-            ContextOperand(
-                scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ j(not_equal, &false_result);
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ Set(map_result_, 1);
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ Set(map_result_, 0);
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();  // Pop the string wrapper.
-  obj.ToRegister();
-  ASSERT(obj.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj.reg());
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Result map_result = allocator()->Allocate();
-  ASSERT(map_result.is_valid());
-  __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  destination()->true_target()->Branch(not_zero);
-
-  // We need an additional two scratch registers for the deferred code.
-  Result temp1 = allocator()->Allocate();
-  ASSERT(temp1.is_valid());
-  Result temp2 = allocator()->Allocate();
-  ASSERT(temp2.is_valid());
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
-  deferred->Branch(zero);
-  deferred->BindExit();
-  __ testq(map_result.reg(), map_result.reg());
-  obj.Unuse();
-  map_result.Unuse();
-  temp1.Unuse();
-  temp2.Unuse();
-  destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  obj.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ movzxbl(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
-  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
-  obj.Unuse();
-  destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  // Get the frame pointer for the calling frame.
-  Result fp = allocator()->Allocate();
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &check_frame_marker);
-  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-         Smi::FromInt(StackFrame::CONSTRUCT));
-  fp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Result fp = allocator_->Allocate();
-  Result result = allocator_->Allocate();
-  ASSERT(fp.is_valid() && result.is_valid());
-
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ movq(result.reg(),
-          Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  result.set_type_info(TypeInfo::Smi());
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(result.reg());
-  }
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave, null, function, non_function_constructor;
-  Load(args->at(0));  // Load the object.
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  frame_->Spill(obj.reg());
-
-  // If the object is a smi, we return null.
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  null.Branch(is_smi);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-
-  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(below);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
-  function.Branch(equal);
-
-  // Check if the constructor in the map is a function.
-  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  non_function_constructor.Branch(not_equal);
-
-  // The obj register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(),
-                       SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->Push(&obj);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  frame_->Push(FACTORY->function_class_symbol());
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  frame_->Push(FACTORY->Object_symbol());
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  frame_->Push(FACTORY->null_value());
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  frame_->Dup();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  ASSERT(object.is_valid());
-  // if (object->IsSmi()) return object.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // if (!object->IsJSValue()) return object.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
-  leave.Branch(not_equal);
-  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
-  object.Unuse();
-  frame_->SetElementAt(0, &temp);
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  Result value = frame_->Pop();
-  Result object = frame_->Pop();
-  value.ToRegister();
-  object.ToRegister();
-
-  // if (object->IsSmi()) return value.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi, &value);
-
-  // It is a heap object - get its map.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  // if (!object->IsJSValue()) return value.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
-  leave.Branch(not_equal, &value);
-
-  // Store the value.
-  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  Result duplicate_value = allocator_->Allocate();
-  ASSERT(duplicate_value.is_valid());
-  __ movq(duplicate_value.reg(), value.reg());
-  // The object register is also overwritten by the write barrier and
-  // possibly aliased in the frame.
-  frame_->Spill(object.reg());
-  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
-                 scratch.reg());
-  object.Unuse();
-  scratch.Unuse();
-  duplicate_value.Unuse();
-
-  // Leave.
-  leave.Bind(&value);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in rdx and the formal
-  // parameter count in rax.
-  Load(args->at(0));
-  Result key = frame_->Pop();
-  // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  Result result = frame_->CallStub(&stub, &key, &count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-  right.ToRegister();
-  left.ToRegister();
-  __ cmpq(right.reg(), left.reg());
-  right.Unuse();
-  left.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  // RBP value is aligned, so it should be tagged as a smi (without necesarily
-  // being padded as a smi, so it should not be treated as a smi.).
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  Result rbp_as_smi = allocator_->Allocate();
-  ASSERT(rbp_as_smi.is_valid());
-  __ movq(rbp_as_smi.reg(), rbp);
-  frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  frame_->SpillAll();
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ movq(rbx, rax);
-
-  __ bind(&heapnumber_allocated);
-
-  // Return a random uint32 number in rax.
-  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(1);
-#ifdef _WIN64
-  __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
-  __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
-  // Convert 32 random bits in rax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-  __ movd(xmm1, rcx);
-  __ movd(xmm0, rax);
-  __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
-  __ subsd(xmm0, xmm1);
-  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
-  __ movq(rax, rbx);
-  Result result = allocator_->Allocate(rax);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  Result answer = frame_->CallStub(&stub, 3);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 4);
-
-  // Load the arguments on the stack and call the runtime system.
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-  RegExpExecStub stub;
-  Result result = frame_->CallStub(&stub, 4);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-  RegExpConstructResultStub stub;
-  Result result = frame_->CallStub(&stub, 3);
-  frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst,
-                      Register cache,
-                      Register key,
-                      Register scratch)
-      : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;    // on invocation index of finger (as int32), on exit
-                    // holds value being looked up.
-  Register cache_;  // instance of JSFunctionResultCache.
-  Register key_;    // key being looked up.
-  Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|.  |index| is int32.
-static Operand ArrayElement(Register array,
-                            Register index,
-                            int additional_offset = 0) {
-  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
-  return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
-  Label first_loop, search_further, second_loop, cache_miss;
-
-  Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
-  Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
-  // Check the cache from finger to start of the cache.
-  __ bind(&first_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, kEntriesIndexImm);
-  __ j(less, &search_further);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &first_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&search_further);
-
-  // Check the cache from end of cache up to finger.
-  __ SmiToInteger32(dst_,
-                    FieldOperand(cache_,
-                                 JSFunctionResultCache::kCacheSizeOffset));
-  __ SmiToInteger32(scratch_,
-                    FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
-  __ bind(&second_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, scratch_);
-  __ j(less_equal, &cache_miss);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &second_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&cache_miss);
-  __ push(cache_);  // store a reference to cache
-  __ push(key_);  // store a key
-  __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ push(key_);
-  // On x64 function must be in rdi.
-  __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
-  ParameterCount expected(1);
-  __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
-  // Find a place to put new cached value into.
-  Label add_new_entry, update_cache;
-  __ movq(rcx, Operand(rsp, kPointerSize));  // restore the cache
-  // Possible optimization: cache size is constant for the given cache
-  // so technically we could use a constant here.  However, if we have
-  // cache miss this optimization would hardly matter much.
-
-  // Check if we could add new entry to cache.
-  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ SmiToInteger32(r9,
-                    FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
-  __ cmpl(rbx, r9);
-  __ j(greater, &add_new_entry);
-
-  // Check if we could evict entry after finger.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ addl(rdx, kEntrySizeImm);
-  Label forward;
-  __ cmpl(rbx, rdx);
-  __ j(greater, &forward);
-  // Need to wrap over the cache.
-  __ movl(rdx, kEntriesIndexImm);
-  __ bind(&forward);
-  __ movl(r9, rdx);
-  __ jmp(&update_cache);
-
-  __ bind(&add_new_entry);
-  // r9 holds cache size as int32.
-  __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
-  // Update the cache itself.
-  // r9 holds the index as int32.
-  __ bind(&update_cache);
-  __ pop(rbx);  // restore the key
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
-  // Store key.
-  __ movq(ArrayElement(rcx, r9), rbx);
-  __ RecordWrite(rcx, 0, rbx, r9);
-
-  // Store value.
-  __ pop(rcx);  // restore the cache.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ incl(rdx);
-  // Backup rax, because the RecordWrite macro clobbers its arguments.
-  __ movq(rbx, rax);
-  __ movq(ArrayElement(rcx, rdx), rax);
-  __ RecordWrite(rcx, 0, rbx, rdx);
-
-  if (!dst_.is(rax)) {
-    __ movq(dst_, rax);
-  }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      Isolate::Current()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->Push(FACTORY->undefined_value());
-    return;
-  }
-
-  Load(args->at(1));
-  Result key = frame_->Pop();
-  key.ToRegister();
-
-  Result cache = allocator()->Allocate();
-  ASSERT(cache.is_valid());
-  __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
-  __ movq(cache.reg(),
-          ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
-                                                          cache.reg(),
-                                                          key.reg(),
-                                                          scratch.reg());
-
-  const int kFingerOffset =
-      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  // tmp.reg() now holds finger offset as a smi.
-  __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
-  __ cmpq(key.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize));
-  deferred->Branch(not_equal);
-  __ movq(tmp.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize + kPointerSize));
-
-  deferred->BindExit();
-  frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and jump to the runtime.
-  Load(args->at(0));
-
-  NumberToStringStub stub;
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  Result index2 = frame_->Pop();
-  index2.ToRegister();
-
-  Result index1 = frame_->Pop();
-  index1.ToRegister();
-
-  Result object = frame_->Pop();
-  object.ToRegister();
-
-  Result tmp1 = allocator()->Allocate();
-  tmp1.ToRegister();
-  Result tmp2 = allocator()->Allocate();
-  tmp2.ToRegister();
-
-  frame_->Spill(object.reg());
-  frame_->Spill(index1.reg());
-  frame_->Spill(index2.reg());
-
-  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
-                                                            index1.reg(),
-                                                            index2.reg());
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
-  deferred->Branch(not_equal);
-  __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
-           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(not_zero);
-
-  // Check the object's elements are in fast case and writable.
-  __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  deferred->Branch(not_equal);
-
-  // Check that both indices are smis.
-  Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
-  deferred->Branch(NegateCondition(both_smi));
-
-  // Check that both indices are valid.
-  __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ SmiCompare(tmp2.reg(), index1.reg());
-  deferred->Branch(below_equal);
-  __ SmiCompare(tmp2.reg(), index2.reg());
-  deferred->Branch(below_equal);
-
-  // Bring addresses into index1 and index2.
-  __ SmiToInteger32(index1.reg(), index1.reg());
-  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
-                                    index1.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-  __ SmiToInteger32(index2.reg(), index2.reg());
-  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
-                                    index2.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-
-  // Swap elements.
-  __ movq(object.reg(), Operand(index1.reg(), 0));
-  __ movq(tmp2.reg(), Operand(index2.reg(), 0));
-  __ movq(Operand(index2.reg(), 0), object.reg());
-  __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
-  Label done;
-  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are smis
-  // (or them and test against Smi mask.)
-
-  __ movq(tmp2.reg(), tmp1.reg());
-  __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
-  __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
-  __ bind(&done);
-
-  deferred->BindExit();
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  Result result = frame_->CallJSFunction(n_args);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Label allocate_return;
-  // Load the two operands while leaving the values on the frame.
-  frame()->Dup();
-  Result exponent = frame()->Pop();
-  exponent.ToRegister();
-  frame()->Spill(exponent.reg());
-  frame()->PushElementAt(1);
-  Result base = frame()->Pop();
-  base.ToRegister();
-  frame()->Spill(base.reg());
-
-  Result answer = allocator()->Allocate();
-  ASSERT(answer.is_valid());
-  ASSERT(!exponent.reg().is(base.reg()));
-  JumpTarget call_runtime;
-
-  // Save 1 in xmm3 - we need this several times later on.
-  __ movl(answer.reg(), Immediate(1));
-  __ cvtlsi2sd(xmm3, answer.reg());
-
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
-  __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
-  // Optimized version when y is an integer.
-  Label powi;
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&powi);
-  // exponent is smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // Optimized version of pow if y is an integer.
-  __ bind(&powi);
-  __ SmiToInteger32(exponent.reg(), exponent.reg());
-
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ movl(base.reg(), exponent.reg());
-
-  // Get absolute value of exponent.
-  Label no_neg;
-  __ cmpl(exponent.reg(), Immediate(0));
-  __ j(greater_equal, &no_neg);
-  __ negl(exponent.reg());
-  __ bind(&no_neg);
-
-  // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
-  __ bind(&while_true);
-  __ shrl(exponent.reg(), Immediate(1));
-  __ j(not_carry, &no_multiply);
-  __ mulsd(xmm1, xmm0);
-  __ bind(&no_multiply);
-  __ testl(exponent.reg(), exponent.reg());
-  __ mulsd(xmm0, xmm0);
-  __ j(not_zero, &while_true);
-
-  // x has the original value of y - if y is negative return 1/result.
-  __ testl(base.reg(), base.reg());
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ movl(answer.reg(), Immediate(0x7FB00000));
-  __ movd(xmm0, answer.reg());
-  __ cvtss2sd(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  call_runtime.Branch(equal);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  call_runtime.Branch(parity_even);
-
-  Label base_not_smi;
-  Label handle_special_cases;
-  __ JumpIfNotSmi(base.reg(), &base_not_smi);
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&handle_special_cases);
-  __ bind(&base_not_smi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
-  __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  call_runtime.Branch(greater_equal);
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ movl(answer.reg(), Immediate(0xBF000000));
-  __ movd(xmm2, answer.reg());
-  __ cvtss2sd(xmm2, xmm2);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  call_runtime.Branch(not_equal);
-
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-
-  JumpTarget done;
-  Label failure, success;
-  __ bind(&allocate_return);
-  // Make a copy of the frame to enable us to handle allocation
-  // failure after the JumpTarget jump.
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
-  __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
-  // Remove the two original values from the frame - we only need those
-  // in the case where we branch to runtime.
-  frame()->Drop(2);
-  exponent.Unuse();
-  base.Unuse();
-  done.Jump(&answer);
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  // If we experience an allocation failure we branch to runtime.
-  __ bind(&failure);
-  call_runtime.Bind();
-  answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
-  done.Bind(&answer);
-  frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-
-  // Leave original value on the frame if we need to call runtime.
-  frame()->Dup();
-  Result result = frame()->Pop();
-  result.ToRegister();
-  frame()->Spill(result.reg());
-  Label runtime;
-  Label non_smi;
-  Label load_done;
-  JumpTarget end;
-
-  __ JumpIfNotSmi(result.reg(), &non_smi);
-  __ SmiToInteger32(result.reg(), result.reg());
-  __ cvtlsi2sd(xmm0, result.reg());
-  __ jmp(&load_done);
-  __ bind(&non_smi);
-  __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &runtime);
-  __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
-  __ bind(&load_done);
-  __ sqrtsd(xmm0, xmm0);
-  // A copy of the virtual frame to allow us to go to runtime after the
-  // JumpTarget jump.
-  Result scratch = allocator()->Allocate();
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
-  __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
-  frame()->Drop(1);
-  scratch.Unuse();
-  end.Jump(&result);
-  // We only branch to runtime if we have an allocation error.
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  __ bind(&runtime);
-  result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
-  end.Bind(&result);
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right_res = frame_->Pop();
-  Result left_res = frame_->Pop();
-  right_res.ToRegister();
-  left_res.ToRegister();
-  Result tmp_res = allocator()->Allocate();
-  ASSERT(tmp_res.is_valid());
-  Register right = right_res.reg();
-  Register left = left_res.reg();
-  Register tmp = tmp_res.reg();
-  right_res.Unuse();
-  left_res.Unuse();
-  tmp_res.Unuse();
-  __ cmpq(left, right);
-  destination()->true_target()->Branch(equal);
-  // Fail if either is a non-HeapObject.
-  Condition either_smi =
-      masm()->CheckEitherSmi(left, right, tmp);
-  destination()->false_target()->Branch(either_smi);
-  __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
-  __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
-          Immediate(JS_REGEXP_TYPE));
-  destination()->false_target()->Branch(not_equal);
-  __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  destination()->false_target()->Branch(not_equal);
-  __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
-  __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result string = frame_->Pop();
-  string.ToRegister();
-
-  Result number = allocator()->Allocate();
-  ASSERT(number.is_valid());
-  __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
-  __ IndexFromHash(number.reg(), number.reg());
-  string.Unuse();
-  frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  if (CheckForInlineRuntimeCall(node)) {
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Push the builtins object found in the current global object.
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), GlobalObjectOperand());
-    __ movq(temp.reg(),
-            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
-    frame_->Push(&temp);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    frame_->Push(node->name());
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                       arg_count,
-                                       loop_nesting_);
-    frame_->RestoreContextRegister();
-    frame_->Push(&answer);
-  } else {
-    // Call the C runtime function.
-    Result answer = frame_->CallRuntime(function, arg_count);
-    frame_->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    // Swap the true and false targets but keep the same actual label
-    // as the fall through.
-    destination()->Invert();
-    LoadCondition(node->expression(), destination(), true);
-    // Swap the labels back.
-    destination()->Invert();
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
-      frame_->Push(&answer);
-      return;
-    }
-
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->Push(variable->name());
-        frame_->Push(Smi::FromInt(kNonStrictMode));
-        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
-                                              CALL_FUNCTION, 3);
-        frame_->Push(&answer);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Call the runtime to delete from the context holding the named
-        // variable.  Sync the virtual frame eagerly so we can push the
-        // arguments directly into place.
-        frame_->SyncRange(0, frame_->element_count() - 1);
-        frame_->EmitPush(rsi);
-        frame_->EmitPush(variable->name());
-        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->Push(&answer);
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->Push(FACTORY->false_value());
-      }
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->SetElementAt(0, FACTORY->true_value());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(&answer);
-
-  } else if (op == Token::VOID) {
-    Expression* expression = node->expression();
-    if (expression && expression->AsLiteral() && (
-        expression->AsLiteral()->IsTrue() ||
-        expression->AsLiteral()->IsFalse() ||
-        expression->AsLiteral()->handle()->IsNumber() ||
-        expression->AsLiteral()->handle()->IsString() ||
-        expression->AsLiteral()->handle()->IsJSRegExp() ||
-        expression->AsLiteral()->IsNull())) {
-      // Omit evaluating the value of the primitive literal.
-      // It will be discarded anyway, and can have no side effect.
-      frame_->Push(FACTORY->undefined_value());
-    } else {
-      Load(node->expression());
-      frame_->SetElementAt(0, FACTORY->undefined_value());
-    }
-
-  } else {
-    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-    UnaryOverwriteMode overwrite =
-        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-    bool no_negative_zero = node->expression()->no_negative_zero();
-    Load(node->expression());
-    switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
-      case Token::SUB: {
-        GenericUnaryOpStub stub(
-            Token::SUB,
-            overwrite,
-            NO_UNARY_FLAGS,
-            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-        Result operand = frame_->Pop();
-        Result answer = frame_->CallStub(&stub, &operand);
-        answer.set_type_info(TypeInfo::Number());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::BIT_NOT: {
-        // Smi check.
-        JumpTarget smi_label;
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        operand.ToRegister();
-
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        smi_label.Branch(is_smi, &operand);
-
-        GenericUnaryOpStub stub(Token::BIT_NOT,
-                                overwrite,
-                                NO_UNARY_SMI_CODE_IN_STUB);
-        Result answer = frame_->CallStub(&stub, &operand);
-        continue_label.Jump(&answer);
-
-        smi_label.Bind(&answer);
-        answer.ToRegister();
-        frame_->Spill(answer.reg());
-        __ SmiNot(answer.reg(), answer.reg());
-        continue_label.Bind(&answer);
-        answer.set_type_info(TypeInfo::Smi());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::ADD: {
-        // Smi check.
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        TypeInfo operand_info = operand.type_info();
-        operand.ToRegister();
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        continue_label.Branch(is_smi, &operand);
-        frame_->Push(&operand);
-        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
-                                              CALL_FUNCTION, 1);
-
-        continue_label.Bind(&answer);
-        if (operand_info.IsSmi()) {
-          answer.set_type_info(TypeInfo::Smi());
-        } else if (operand_info.IsInteger32()) {
-          answer.set_type_info(TypeInfo::Integer32());
-        } else {
-          answer.set_type_info(TypeInfo::Number());
-        }
-        frame_->Push(&answer);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub.  The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
-  DeferredPrefixCountOperation(Register dst,
-                               bool is_increment,
-                               TypeInfo input_type)
-      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number.  Update the original value in
-// old.  Call the specialized add or subtract stub.  The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
-  DeferredPostfixCountOperation(Register dst,
-                                Register old,
-                                bool is_increment,
-                                TypeInfo input_type)
-      : dst_(dst),
-        old_(old),
-        is_increment_(is_increment),
-        input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Register old_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    __ push(dst_);  // Save the input to use as the old value.
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    __ push(rax);  // Save the result of ToNumber to use as the old value.
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  Comment cmnt(masm_, "[ CountOperation");
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
-  // Postfix operations need a stack slot under the reference to hold
-  // the old value while the new value is being stored.  This is so that
-  // in the case that storing the new value requires a call, the old
-  // value will be in the frame to be spilled.
-  if (is_postfix) frame_->Push(Smi::FromInt(0));
-
-  // A constant reference is not saved to, so the reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!is_postfix) frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    target.TakeValue();
-
-    Result new_value = frame_->Pop();
-    new_value.ToRegister();
-
-    Result old_value;  // Only allocated in the postfix case.
-    if (is_postfix) {
-      // Allocate a temporary to preserve the old value.
-      old_value = allocator_->Allocate();
-      ASSERT(old_value.is_valid());
-      __ movq(old_value.reg(), new_value.reg());
-
-      // The return value for postfix operations is ToNumber(input).
-      // Keep more precise type info if the input is some kind of
-      // number already. If the input is not a number we have to wait
-      // for the deferred code to convert it.
-      if (new_value.type_info().IsNumber()) {
-        old_value.set_type_info(new_value.type_info());
-      }
-    }
-    // Ensure the new value is writable.
-    frame_->Spill(new_value.reg());
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment,
-                                                   new_value.type_info());
-    } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment,
-                                                  new_value.type_info());
-    }
-
-    if (new_value.is_smi()) {
-      if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
-    } else {
-      __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
-    }
-    if (is_increment) {
-      __ SmiAddConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    } else {
-      __ SmiSubConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    }
-    deferred->BindExit();
-
-    // Postfix count operations return their input converted to
-    // number. The case when the input is already a number is covered
-    // above in the allocation code for old_value.
-    if (is_postfix && !new_value.type_info().IsNumber()) {
-      old_value.set_type_info(TypeInfo::Number());
-    }
-
-    new_value.set_type_info(TypeInfo::Number());
-
-    // Postfix: store the old value in the allocated slot under the
-    // reference.
-    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
-    frame_->Push(&new_value);
-    // Non-constant: update the reference.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
-  }
-
-  // Postfix: drop the new value and use the old.
-  if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not
-  // control flow), we force the right hand side to do the same. This
-  // is necessary because we assume that if we get control flow on the
-  // last path out of an expression we got it on all paths.
-  if (node->op() == Token::AND) {
-    JumpTarget is_true;
-    ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The current false target was used as the fall-through.  If
-      // there are no dangling jumps to is_true then the left
-      // subexpression was unconditionally false.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_true.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current false target was a forward jump then we have a
-        // valid frame, we have just bound the false target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->false_target()->Unuse();
-          destination()->false_target()->Jump();
-        }
-        is_true.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have actually just jumped to or bound the current false
-        // target but the current control destination is not marked as
-        // used.
-        destination()->Use(false);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_true
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_true
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&pop_and_continue, &exit, true);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    JumpTarget is_false;
-    ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.true_was_fall_through()) {
-      // The current true target was used as the fall-through.  If
-      // there are no dangling jumps to is_false then the left
-      // subexpression was unconditionally true.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_false.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current true target was a forward jump then we have a
-        // valid frame, we have just bound the true target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->true_target()->Unuse();
-          destination()->true_target()->Jump();
-        }
-        is_false.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have just jumped to or bound the current true target but
-        // the current control destination is not marked as used.
-        destination()->Use(true);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_false
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_false
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&exit, &pop_and_continue, false);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-  }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else {
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    OverwriteMode overwrite_mode = NO_OVERWRITE;
-    if (node->left()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_LEFT;
-    } else if (node->right()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_RIGHT;
-    }
-
-    if (node->left()->IsTrivial()) {
-      Load(node->right());
-      Result right = frame_->Pop();
-      frame_->Push(node->left());
-      frame_->Push(&right);
-    } else {
-      Load(node->left());
-      Load(node->right());
-    }
-    GenericBinaryOperation(node, overwrite_mode);
-  }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
-    // Load the operand and move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Result answer = frame_->Pop();
-    answer.ToRegister();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->true_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable string object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
-      answer.Unuse();
-      destination()->Split(below);  // Unsigned byte comparison needed.
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
-      destination()->true_target()->Branch(equal);
-      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      answer.Unuse();
-      destination()->Split(not_zero);
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
-      destination()->true_target()->Branch(equal);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->object_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      // Regular expressions are typeof == 'function', not 'object'.
-      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
-      destination()->false_target()->Branch(equal);
-
-      // It can be an undetectable object.
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
-      destination()->false_target()->Branch(below);
-      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
-      answer.Unuse();
-      destination()->Split(below_equal);
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      answer.Unuse();
-      destination()->Goto(false);
-    }
-    return;
-  }
-
-  Condition cc = no_condition;
-  bool strict = false;
-  switch (op) {
-    case Token::EQ_STRICT:
-      strict = true;
-      // Fall through
-    case Token::EQ:
-      cc = equal;
-      break;
-    case Token::LT:
-      cc = less;
-      break;
-    case Token::GT:
-      cc = greater;
-      break;
-    case Token::LTE:
-      cc = less_equal;
-      break;
-    case Token::GTE:
-      cc = greater_equal;
-      break;
-    case Token::IN: {
-      Load(left);
-      Load(right);
-      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
-      frame_->Push(&answer);  // push the result
-      return;
-    }
-    case Token::INSTANCEOF: {
-      Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      Result answer = frame_->CallStub(&stub, 2);
-      answer.ToRegister();
-      __ testq(answer.reg(), answer.reg());
-      answer.Unuse();
-      destination()->Split(zero);
-      return;
-    }
-    default:
-      UNREACHABLE();
-  }
-
-  if (left->IsTrivial()) {
-    Load(right);
-    Result right_result = frame_->Pop();
-    frame_->Push(left);
-    frame_->Push(&right_result);
-  } else {
-    Load(left);
-    Load(right);
-  }
-
-  Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Result operand = frame_->Pop();
-  operand.ToRegister();
-  __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
-  if (node->is_strict()) {
-    operand.Unuse();
-    destination()->Split(equal);
-  } else {
-    // The 'null' value is only equal to 'undefined' if using non-strict
-    // comparisons.
-    destination()->true_target()->Branch(equal);
-    __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
-    destination()->true_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(operand.reg());
-    destination()->false_target()->Branch(is_smi);
-
-    // It can be an undetectable object.
-    // Use a scratch register in preference to spilling operand.reg().
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(),
-            FieldOperand(operand.reg(), HeapObject::kMapOffset));
-    __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-             Immediate(1 << Map::kIsUndetectable));
-    temp.Unuse();
-    operand.Unuse();
-    destination()->Split(not_zero);
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
-      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
-      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
-      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
-      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
-      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
-      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
-      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
-      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
-      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.  The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceGetNamedValue(Register dst,
-                                 Register receiver,
-                                 Handle<String> name)
-      : dst_(dst), receiver_(receiver),  name_(name) {
-    set_comment("[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  if (!receiver_.is(rax)) {
-    __ movq(rax, receiver_);
-  }
-  __ Move(rcx, name_);
-  Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a test rax instruction to indicate
-  // that the inobject property case was inlined.
-  //
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use masm_-> instead of the __ macro since the
-  // latter can't return a value.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->named_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetKeyedValue(Register dst,
-                                          Register receiver,
-                                          Register key)
-      : dst_(dst), receiver_(receiver), key_(key) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rax)) {
-      __ movq(rax, key_);
-    }  // else do nothing.
-  } else if (receiver_.is(rax)) {
-    if (key_.is(rdx)) {
-      __ xchg(rax, rdx);
-    } else if (key_.is(rax)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rax, key_);
-    }
-  } else if (key_.is(rax)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rax, key_);
-    __ movq(rdx, receiver_);
-  }
-  // Calculate the delta from the IC call instruction to the map check
-  // movq instruction in the inlined version.  This delta is stored in
-  // a test(rax, delta) instruction after the call so that we can find
-  // it in the IC initialization code and patch the movq instruction.
-  // This means that we cannot allow test instructions after calls to
-  // KeyedLoadIC stubs in other places.
-  Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  // TODO(X64): Consider whether it's worth switching the test to a
-  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
-  // be generated normally.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  Label patch_site_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
-  // Move value, receiver, and key to registers rax, rdx, and rcx, as
-  // the IC stub expects.
-  // Move value to rax, using xchg if the receiver or key is in rax.
-  if (!value_.is(rax)) {
-    if (!receiver_.is(rax) && !key_.is(rax)) {
-      __ movq(rax, value_);
-    } else {
-      __ xchg(rax, value_);
-      // Update receiver_ and key_ if they are affected by the swap.
-      if (receiver_.is(rax)) {
-        receiver_ = value_;
-      } else if (receiver_.is(value_)) {
-        receiver_ = rax;
-      }
-      if (key_.is(rax)) {
-        key_ = value_;
-      } else if (key_.is(value_)) {
-        key_ = rax;
-      }
-    }
-  }
-  // Value is now in rax. Its original location is remembered in value_,
-  // and the value is restored to value_ before returning.
-  // The variables receiver_ and key_ are not preserved.
-  // Move receiver and key to rdx and rcx, swapping if necessary.
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rcx)) {
-      __ movq(rcx, key_);
-    }  // Else everything is already in the right place.
-  } else if (receiver_.is(rcx)) {
-    if (key_.is(rdx)) {
-      __ xchg(rcx, rdx);
-    } else if (key_.is(rcx)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rcx, key_);
-    }
-  } else if (key_.is(rcx)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rcx, key_);
-    __ movq(rdx, receiver_);
-  }
-
-  // Call the IC stub.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                    : Builtins::kKeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instructions (initial movq)
-  // to the test instruction.  We use masm_-> directly here instead of the
-  // __ macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC).
-  if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Do not inline the inobject property case for loads from the global
-  // object.  Also do not inline for unoptimized code.  This saves time
-  // in the code generator.  Unoptimized code is toplevel code or code
-  // that is not in a loop.
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    frame()->Push(name);
-
-    RelocInfo::Mode mode = is_contextual
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    result = frame()->CallLoadIC(mode);
-    // A test rax instruction following the call signals that the
-    // inobject property case was inlined.  Ensure that there is not
-    // a test rax instruction here.
-    __ nop();
-  } else {
-    // Inline the inobject property case.
-    Comment cmnt(masm(), "[ Inlined named property load");
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-
-    // r12 is now a reserved register, so it cannot be the receiver.
-    // If it was, the distance to the fixup location would not be constant.
-    ASSERT(!receiver.reg().is(r12));
-
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
-    // Check that the receiver is a heap object.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    __ bind(deferred->patch_site());
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed
-    // size which allows the assert below to succeed and patching to work.
-    // Don't use deferred->Branch(...), since that might add coverage code.
-    masm()->j(not_equal, deferred->entry_label());
-
-    // The delta from the patch label to the load offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-           LoadIC::kOffsetToLoadInstruction);
-    // The initial (invalid) offset has to be large enough to force
-    // a 32-bit instruction encoding to allow patching with an
-    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->named_load_inline(), 1);
-    deferred->BindExit();
-  }
-  ASSERT(frame()->height() == original_height - 1);
-  return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-      result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-      // A test rax instruction following the call signals that the inobject
-      // property case was inlined.  Ensure that there is not a test rax
-      // instruction here.
-      __ nop();
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-    Label patch_site;
-
-    // Get the value and receiver from the stack.
-    Result value = frame()->Pop();
-    value.ToRegister();
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    // Allocate result register.
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
-    // r12 is now a reserved register, so it cannot be the receiver.
-    // If it was, the distance to the fixup location would not be constant.
-    ASSERT(!receiver.reg().is(r12));
-
-    // Check that the receiver is a heap object.
-    Condition is_smi = masm()->CheckSmi(receiver.reg());
-    slow.Branch(is_smi, &value, &receiver);
-
-    // This is the map check instruction that will be patched.
-    // Initially use an invalid map to force a failure. The exact
-    // instruction sequence is important because we use the
-    // kOffsetToStoreInstruction constant for patching. We avoid using
-    // the __ macro for the following two instructions because it
-    // might introduce extra instructions.
-    __ bind(&patch_site);
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    slow.Branch(not_equal, &value, &receiver);
-
-    // The delta from the patch label to the store offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
-           StoreIC::kOffsetToStoreInstruction);
-
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    __ movq(FieldOperand(receiver.reg(), offset), value.reg());
-    __ movq(result.reg(), value.reg());
-
-    // Allocate scratch register for write barrier.
-    Result scratch = allocator()->Allocate();
-    ASSERT(scratch.is_valid());
-
-    // The write barrier clobbers all input registers, so spill the
-    // receiver and the value.
-    frame_->Spill(receiver.reg());
-    frame_->Spill(value.reg());
-
-    // If the receiver and the value share a register allocate a new
-    // register for the receiver.
-    if (receiver.reg().is(value.reg())) {
-      receiver = allocator()->Allocate();
-      ASSERT(receiver.is_valid());
-      __ movq(receiver.reg(), value.reg());
-    }
-
-    // Update the write barrier. To save instructions in the inlined
-    // version we do not filter smis.
-    Label skip_write_barrier;
-    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
-    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
-    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
-    if (FLAG_debug_code) {
-      __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    }
-    __ bind(&skip_write_barrier);
-    value.Unuse();
-    scratch.Unuse();
-    receiver.Unuse();
-    done.Jump(&result);
-
-    slow.Bind(&value, &receiver);
-    frame()->Push(&receiver);
-    frame()->Push(&value);
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // Encode the offset to the map check instruction and the offset
-    // to the write barrier store address computation in a test rax
-    // instruction.
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ testl(rax,
-             Immediate((delta_to_record_write << 16) | delta_to_patch_site));
-    done.Bind(&result);
-  }
-
-  ASSERT_EQ(expected_height, frame()->height());
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Inline array load code if inside of a loop.  We do not know
-  // the receiver map yet, so we initially generate the code with
-  // a check against an invalid map.  In the inline cache code, we
-  // patch the map check if appropriate.
-  if (loop_nesting() > 0) {
-    Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    // Allocate the temporary early so that we use rax if it is free.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    // If key and receiver are shared registers on the frame, their values will
-    // be automatically saved and restored when going to deferred code.
-    // The result is returned in elements, which is not shared.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(elements.reg(),
-                                           receiver.reg(),
-                                           key.reg());
-
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the receiver has the expected map.
-    // Initially, use an invalid map. The map is patched in the IC
-    // initialization code.
-    __ bind(deferred->patch_site());
-    // Use masm-> here instead of the double underscore macro since extra
-    // coverage code can interfere with the patching.  Do not use a load
-    // from the root array to load null_value, since the load must be patched
-    // with the expected receiver map, which is not in the root array.
-    masm_->movq(kScratchRegister, FACTORY->null_value(),
-                RelocInfo::EMBEDDED_OBJECT);
-    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                kScratchRegister);
-    deferred->Branch(not_equal);
-
-    __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
-    // Get the elements array from the receiver.
-    __ movq(elements.reg(),
-            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ AssertFastElements(elements.reg());
-
-    // Check that key is within bounds.
-    __ SmiCompare(key.reg(),
-                  FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Load and check that the result is not the hole.  We could
-    // reuse the index or elements register for the value.
-    //
-    // TODO(206): Consider whether it makes sense to try some
-    // heuristic about which register to reuse.  For example, if
-    // one is rax, the we can reuse that one because the value
-    // coming from the deferred code will be in rax.
-    SmiIndex index =
-        masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize));
-    result = elements;
-    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
-    deferred->Branch(equal);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_load_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    Comment cmnt(masm_, "[ Load from keyed Property");
-    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed load.  The explicit nop instruction is here because
-    // the push that follows might be peep-hole optimized away.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 2);
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
-    // Get the receiver, key and value into registers.
-    result = frame()->Pop();
-    Result key = frame()->Pop();
-    Result receiver = frame()->Pop();
-
-    Result tmp = allocator_->Allocate();
-    ASSERT(tmp.is_valid());
-    Result tmp2 = allocator_->Allocate();
-    ASSERT(tmp2.is_valid());
-
-    // Determine whether the value is a constant before putting it in a
-    // register.
-    bool value_is_constant = result.is_constant();
-
-    // Make sure that value, key and receiver are in registers.
-    result.ToRegister();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(result.reg(),
-                                           key.reg(),
-                                           receiver.reg(),
-                                           strict_mode_flag());
-
-    // Check that the receiver is not a smi.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ JumpIfNotSmi(key.reg(), deferred->entry_label());
-    } else if (FLAG_debug_code) {
-      __ AbortIfNotSmi(key.reg());
-    }
-
-    // Check that the receiver is a JSArray.
-    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Get the elements array from the receiver and check that it is not a
-    // dictionary.
-    __ movq(tmp.reg(),
-            FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
-    // Check whether it is possible to omit the write barrier. If the elements
-    // array is in new space or the value written is a smi we can safely update
-    // the elements array without write barrier.
-    Label in_new_space;
-    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-    if (!value_is_constant) {
-      __ JumpIfNotSmi(result.reg(), deferred->entry_label());
-    }
-
-    __ bind(&in_new_space);
-    // Bind the deferred code patch site to be able to locate the fixed
-    // array map comparison.  When debugging, we patch this comparison to
-    // always fail so that we will hit the IC call in the deferred code
-    // which will allow the debugger to break for fast case stores.
-    __ bind(deferred->patch_site());
-    // Avoid using __ to ensure the distance from patch_site
-    // to the map address is always the same.
-    masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
-               RelocInfo::EMBEDDED_OBJECT);
-    __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-            kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis (because the fixed array check above ensures the
-    // elements are in fast case). Use unsigned comparison to handle negative
-    // keys.
-    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
-                  key.reg());
-    deferred->Branch(below_equal);
-
-    // Store the value.
-    SmiIndex index =
-        masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(FieldOperand(tmp.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize),
-            result.reg());
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_store_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    result = frame()->CallKeyedStoreIC(strict_mode_flag());
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed store.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 3);
-  return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-
-  // Record the source position for the property load.
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      if (persist_after_get_) {
-        cgen_->frame()->Dup();
-      }
-      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
-      cgen_->frame()->Push(&result);
-      break;
-    }
-
-    case KEYED: {
-      // A load of a bare identifier (load from global) cannot be keyed.
-      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
-      if (persist_after_get_) {
-        cgen_->frame()->PushElementAt(1);
-        cgen_->frame()->PushElementAt(1);
-      }
-      Result value = cgen_->EmitKeyedLoad();
-      cgen_->frame()->Push(&value);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-
-  if (!persist_after_get_) {
-    set_unloaded();
-  }
-}
-
-
-void Reference::TakeValue() {
-  // TODO(X64): This function is completely architecture independent. Move
-  // it somewhere shared.
-
-  // For non-constant frame-allocated slots, we invalidate the value in the
-  // slot.  For all others, we fall back on GetValue.
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(!is_illegal());
-  if (type_ != SLOT) {
-    GetValue();
-    return;
-  }
-
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP ||
-      slot->type() == Slot::CONTEXT ||
-      slot->var()->mode() == Variable::CONST ||
-      slot->is_arguments()) {
-    GetValue();
-    return;
-  }
-
-  // Only non-constant, frame-allocated parameters and locals can reach
-  // here.  Be careful not to use the optimizations for arguments
-  // object access since it may not have been initialized yet.
-  ASSERT(!slot->is_arguments());
-  if (slot->type() == Slot::PARAMETER) {
-    cgen_->frame()->TakeParameterAt(slot->index());
-  } else {
-    ASSERT(slot->type() == Slot::LOCAL);
-    cgen_->frame()->TakeLocalAt(slot->index());
-  }
-
-  ASSERT(persist_after_get_);
-  // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      Result answer = cgen_->EmitNamedStore(GetName(), false);
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression()->AsProperty();
-      ASSERT(property != NULL);
-
-      Result answer = cgen_->EmitKeyedStore(property->key()->type());
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case UNLOADED:
-    case ILLEGAL:
-      UNREACHABLE();
-  }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                                      Result* left,
-                                                      Result* right) {
-  if (stub->ArgsInRegistersSupported()) {
-    stub->SetArgsInRegisters();
-    return frame_->CallStub(stub, left, right);
-  } else {
-    frame_->Push(left);
-    frame_->Push(right);
-    return frame_->CallStub(stub, 2);
-  }
-}
-
-#undef __
-
 #define __ masm.
 
 #ifdef _WIN64
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 9a70907..94c7850 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -30,270 +30,17 @@
 
 #include "ast.h"
 #include "ic-inl.h"
-#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 
 // -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  The value of the reference may be invalidated,
-  // causing subsequent attempts to read it to fail.
-  void TakeValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through.  The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally.  Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
-  ControlDestination(JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool true_is_fall_through)
-      : true_target_(true_target),
-        false_target_(false_target),
-        true_is_fall_through_(true_is_fall_through),
-        is_used_(false) {
-    ASSERT(true_is_fall_through ? !true_target->is_bound()
-                                : !false_target->is_bound());
-  }
-
-  // Accessors for the jump targets.  Directly jumping or branching to
-  // or binding the targets will not update the destination's state.
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
-
-  // True if the the destination has been jumped to unconditionally or
-  // control has been split to both targets.  This predicate does not
-  // test whether the targets have been extracted and manipulated as
-  // raw jump targets.
-  bool is_used() const { return is_used_; }
-
-  // True if the destination is used and the true target (respectively
-  // false target) was the fall through.  If the target is backward,
-  // "fall through" included jumping unconditionally to it.
-  bool true_was_fall_through() const {
-    return is_used_ && true_is_fall_through_;
-  }
-
-  bool false_was_fall_through() const {
-    return is_used_ && !true_is_fall_through_;
-  }
-
-  // Emit a branch to one of the true or false targets, and bind the
-  // other target.  Because this binds the fall-through target, it
-  // should be emitted in tail position (as the last thing when
-  // compiling an expression).
-  void Split(Condition cc) {
-    ASSERT(!is_used_);
-    if (true_is_fall_through_) {
-      false_target_->Branch(NegateCondition(cc));
-      true_target_->Bind();
-    } else {
-      true_target_->Branch(cc);
-      false_target_->Bind();
-    }
-    is_used_ = true;
-  }
-
-  // Emit an unconditional jump in tail position, to the true target
-  // (if the argument is true) or the false target.  The "jump" will
-  // actually bind the jump target if it is forward, jump to it if it
-  // is backward.
-  void Goto(bool where) {
-    ASSERT(!is_used_);
-    JumpTarget* target = where ? true_target_ : false_target_;
-    if (target->is_bound()) {
-      target->Jump();
-    } else {
-      target->Bind();
-    }
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Mark this jump target as used as if Goto had been called, but
-  // without generating a jump or binding a label (the control effect
-  // should have already happened).  This is used when the left
-  // subexpression of the short-circuit boolean operators are
-  // compiled.
-  void Use(bool where) {
-    ASSERT(!is_used_);
-    ASSERT((where ? true_target_ : false_target_)->is_bound());
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Swap the true and false targets but keep the same actual label as
-  // the fall through.  This is used when compiling negated
-  // expressions, where we want to swap the targets but preserve the
-  // state.
-  void Invert() {
-    JumpTarget* temp_target = true_target_;
-    true_target_ = false_target_;
-    false_target_ = temp_target;
-
-    true_is_fall_through_ = !true_is_fall_through_;
-  }
-
- private:
-  // True and false jump targets.
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-
-  // Before using the destination: true if the true target is the
-  // preferred fall through, false if the false target is.  After
-  // using the destination: true if the true target was actually used
-  // as the fall through, false if the false target was.
-  bool true_is_fall_through_;
-
-  // True if the Split or Goto functions have been called.
-  bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair).  It is threaded through
-// the call stack.  Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own control destination.
-  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  ~CodeGenState();
-
-  // Accessors for the state.
-  ControlDestination* destination() const { return destination_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-  // A control destination in case the expression has a control-flow
-  // effect.
-  ControlDestination* destination_;
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -319,431 +66,7 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
-  bool in_spilled_code() const { return in_spilled_code_; }
-  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  // State
-  ControlDestination* destination() const { return state_->destination(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).
-  void GenerateReturnSequence(Result* return_value);
-
-  // Generate code for a fast smi loop.
-  void GenerateFastSmiLoop(ForStatement* node);
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  Result StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  Operand SlotOperand(Slot* slot, Register tmp);
-
-  Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Result tmp,
-                                            JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* x,
-                     ControlDestination* destination,
-                     bool force_control);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver();
-
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                           TypeofState typeof_state,
-                                           JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Result* result,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the expression stack into a slot, leaving the
-  // value in place.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Receiver is passed on the frame and not consumed.
-  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // If the store is contextual, value is passed on the frame and consumed.
-  // Otherwise, receiver and value are passed on the frame and consumed.
-  Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a property of an object, returning it in a Result.
-  // The object and the property name are passed on the stack, and
-  // not changed.
-  Result EmitKeyedLoad();
-
-  // Receiver, key, and value are passed on the frame and consumed.
-  Result EmitKeyedStore(StaticType* key_type);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Translate the value on top of the frame into control flow to the
-  // control destination.
-  void ToBoolean(ControlDestination* destination);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(BinaryOperation* expr,
-                              OverwriteMode overwrite_mode);
-
-  // Generate a stub call from the virtual frame.
-  Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                         Result* left,
-                                         Result* right);
-
-  // Emits code sequence that jumps to a JumpTarget if the inputs
-  // are both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfBothSmiUsingTypeInfo(Result* left,
-                                  Result* right,
-                                  JumpTarget* both_smi);
-
-  // Emits code sequence that jumps to deferred code if the input
-  // is not a smi.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotSmiUsingTypeInfo(Register reg,
-                                 TypeInfo type,
-                                 DeferredCode* deferred);
-
-  // Emits code sequence that jumps to deferred code if the inputs
-  // are not both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     DeferredCode* deferred);
-
-  // If possible, combine two constant smi values using op to produce
-  // a smi result, and push it on the virtual frame, all at compile time.
-  // Returns true if it succeeds.  Otherwise it has no effect.
-  bool FoldConstantSmis(Token::Value op, int left, int right);
-
-  // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result *operand.
-  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                    Result* operand,
-                                    Handle<Object> constant_operand,
-                                    bool reversed,
-                                    OverwriteMode overwrite_mode);
-
-  // Emit code to perform a binary operation on two likely smis.
-  // The code to handle smi arguments is produced inline.
-  // Consumes the Results *left and *right.
-  Result LikelySmiBinaryOperation(BinaryOperation* expr,
-                                  Result* left,
-                                  Result* right,
-                                  OverwriteMode overwrite_mode);
-
-  void Comparison(AstNode* node,
-                  Condition cc,
-                  bool strict,
-                  ControlDestination* destination);
-
-  // If at least one of the sides is a constant smi, generate optimized code.
-  void ConstantSmiComparison(Condition cc,
-                             bool strict,
-                             ControlDestination* destination,
-                             Result* left_side,
-                             Result* right_side,
-                             bool left_side_constant_smi,
-                             bool right_side_constant_smi,
-                             bool is_loop_condition);
-
-  void GenerateInlineNumberComparison(Result* left_side,
-                                      Result* right_side,
-                                      Condition cc,
-                                      ControlDestination* dest);
-
-  // To prevent long attacker-controlled byte sequences, integer constants
-  // from the JavaScript source are loaded in two parts if they are larger
-  // than 16 bits.
-  static const int kMaxSmiInlinedBits = 16;
-  bool IsUnsafeSmi(Handle<Object> value);
-  // Load an integer constant x into a register target using
-  // at most 16 bits of user-controlled data per assembly operation.
-  void LoadUnsafeSmi(Register target, Handle<Object> value);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements. Takes three expressions, the object and two
-  // indices. This should only be used if the indices are known to be
-  // non-negative and within bounds of the elements array at the call site.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  // Check whether two RegExps are equivalent.
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.  There should
-  // be no frame-external references to (non-reserved) registers.
-  bool HasValidEntryRegisters();
-#endif
-
-  ZoneList<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  // Jump targets.
-  // The target of the return from the function.
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  // True when we are in code that expects the virtual frame to be fully
-  // spilled.  Some virtual frame function are disabled in DEBUG builds when
-  // called from spilled code, because they do not leave the virtual frame
-  // in a spilled state.
-  bool in_spilled_code_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class Result;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-
-  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
-  friend class InlineRuntimeFunctionsTable;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 0398465..423e6f2 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -29,7 +29,8 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "assembler.h"
+#include "codegen.h"
 #include "debug.h"
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 4bf84a8..97168cd 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -232,7 +232,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       NearLabel ok;
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
       __ j(above_equal, &ok);
@@ -781,7 +781,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -809,7 +809,7 @@
       __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target()->entry_label());
+      __ jmp(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -821,7 +821,7 @@
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target()->entry_label());
+    __ jmp(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -831,14 +831,14 @@
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_target());
   } else {
-    __ jmp(default_clause->body_target()->entry_label());
+    __ jmp(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -3811,7 +3811,7 @@
   if (assign_type == VARIABLE) {
     PrepareForBailout(expr->expression(), TOS_REG);
   } else {
-    PrepareForBailout(expr->increment(), TOS_REG);
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
   }
 
   // Call ToNumber only if operand is not a smi.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9180465..38c85f0 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
deleted file mode 100644
index e715604..0000000
--- a/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There is an expected frame to merge to.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else if (entry_frame_ != NULL) {
-    // Forward jump with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and jump to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else {
-    // Forward jump.  Remember the current frame and emit a jump to
-    // its merge code.
-    AddReachingFrame(cgen()->frame());
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-    __ jmp(&merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
-    // Backward branch.  We have an expected frame to merge to on the
-    // backward edge.
-
-    // Swap the current frame for a copy (we do the swapping to get
-    // the off-frame registers off the fall through) to use for the
-    // branch.
-    VirtualFrame* fall_through_frame = cgen()->frame();
-    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
-    RegisterFile non_frame_registers;
-    cgen()->SetFrame(branch_frame, &non_frame_registers);
-
-    // Check if we can avoid merge code.
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen()->frame()->Equals(entry_frame_)) {
-      // Branch right in to the block.
-      cgen()->DeleteFrame();
-      __ j(cc, &entry_label_);
-      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-      return;
-    }
-
-    // Check if we can reuse existing merge code.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (reaching_frames_[i] != NULL &&
-          cgen()->frame()->Equals(reaching_frames_[i])) {
-        // Branch to the merge code.
-        cgen()->DeleteFrame();
-        __ j(cc, &merge_labels_[i]);
-        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-        return;
-      }
-    }
-
-    // To emit the merge code here, we negate the condition and branch
-    // around the merge code on the fall through path.
-    Label original_fall_through;
-    __ j(NegateCondition(cc), &original_fall_through);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-    __ bind(&original_fall_through);
-
-  } else if (entry_frame_ != NULL) {
-    // Forward branch with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and branch to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    // Explicitly use the macro assembler instead of __ as forward
-    // branches are expected to be a fixed size (no inserted
-    // coverage-checking instructions please).  This is used in
-    // Reference::GetValue.
-    cgen()->masm()->j(cc, &entry_label_);
-
-  } else {
-    // Forward branch.  A copy of the current frame is remembered and
-    // a branch to the merge code is emitted.  Explicitly use the
-    // macro assembler instead of __ as forward branches are expected
-    // to be a fixed size (no inserted coverage-checking instructions
-    // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen()->frame()));
-    cgen()->masm()->j(cc, &merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  cgen()->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  // We do not expect a call with a preconfigured entry frame.
-  ASSERT(entry_frame_ == NULL);
-  AddReachingFrame(target_frame);
-  __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(cgen() != NULL);
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  // Fast case: the jump target was manually configured with an entry
-  // frame to use.
-  if (entry_frame_ != NULL) {
-    // Assert no reaching frames to deal with.
-    ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen()->has_valid_frame());
-
-    RegisterFile empty;
-    if (direction_ == BIDIRECTIONAL) {
-      // Copy the entry frame so the original can be used for a
-      // possible backward jump.
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    } else {
-      // Take ownership of the entry frame.
-      cgen()->SetFrame(entry_frame_, &empty);
-      entry_frame_ = NULL;
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (!is_linked()) {
-    ASSERT(cgen()->has_valid_frame());
-    if (direction_ == FORWARD_ONLY) {
-      // Fast case: no forward jumps and no possible backward jumps.
-      // The stack pointer can be floating above the top of the
-      // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen()->frame();
-      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-      if (difference > 0) {
-        frame->stack_pointer_ -= difference;
-        __ addq(rsp, Immediate(difference * kPointerSize));
-      }
-    } else {
-      ASSERT(direction_ == BIDIRECTIONAL);
-      // Fast case: no forward jumps, possible backward ones.  Remove
-      // constants and copies above the watermark on the fall-through
-      // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable();
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (direction_ == FORWARD_ONLY &&
-      !cgen()->has_valid_frame() &&
-      reaching_frames_.length() == 1) {
-    // Fast case: no fall-through, a single forward jump, and no
-    // possible backward jumps.  Pick up the only reaching frame, take
-    // ownership of it, and use it for the block about to be emitted.
-    VirtualFrame* frame = reaching_frames_[0];
-    RegisterFile empty;
-    cgen()->SetFrame(frame, &empty);
-    reaching_frames_[0] = NULL;
-    __ bind(&merge_labels_[0]);
-
-    // The stack pointer can be floating above the top of the
-    // virtual frame before the bind.  Afterward, it should not.
-    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-    if (difference > 0) {
-      frame->stack_pointer_ -= difference;
-      __ addq(rsp, Immediate(difference * kPointerSize));
-    }
-
-    __ bind(&entry_label_);
-    return;
-  }
-
-  // If there is a current frame, record it as the fall-through.  It
-  // is owned by the reaching frames for now.
-  bool had_fall_through = false;
-  if (cgen()->has_valid_frame()) {
-    had_fall_through = true;
-    AddReachingFrame(cgen()->frame());  // Return value ignored.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-
-  // Compute the frame to use for entry to the block.
-  ComputeEntryFrame();
-
-  // Some moves required to merge to an expected frame require purely
-  // frame state changes, and do not require any code generation.
-  // Perform those first to increase the possibility of finding equal
-  // frames below.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    if (reaching_frames_[i] != NULL) {
-      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
-    }
-  }
-
-  if (is_linked()) {
-    // There were forward jumps.  Handle merging the reaching frames
-    // to the entry frame.
-
-    // Loop over the (non-null) reaching frames and process any that
-    // need merge code.  Iterate backwards through the list to handle
-    // the fall-through frame first.  Set frames that will be
-    // processed after 'i' to NULL if we want to avoid processing
-    // them.
-    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
-      VirtualFrame* frame = reaching_frames_[i];
-
-      if (frame != NULL) {
-        // Does the frame (probably) need merge code?
-        if (!frame->Equals(entry_frame_)) {
-          // We could have a valid frame as the fall through to the
-          // binding site or as the fall through from a previous merge
-          // code block.  Jump around the code we are about to
-          // generate.
-          if (cgen()->has_valid_frame()) {
-            cgen()->DeleteFrame();
-            __ jmp(&entry_label_);
-          }
-          // Pick up the frame for this block.  Assume ownership if
-          // there cannot be backward jumps.
-          RegisterFile empty;
-          if (direction_ == BIDIRECTIONAL) {
-            cgen()->SetFrame(new VirtualFrame(frame), &empty);
-          } else {
-            cgen()->SetFrame(frame, &empty);
-            reaching_frames_[i] = NULL;
-          }
-          __ bind(&merge_labels_[i]);
-
-          // Loop over the remaining (non-null) reaching frames,
-          // looking for any that can share merge code with this one.
-          for (int j = 0; j < i; j++) {
-            VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen()->frame())) {
-              // Set the reaching frame element to null to avoid
-              // processing it later, and then bind its entry label.
-              reaching_frames_[j] = NULL;
-              __ bind(&merge_labels_[j]);
-            }
-          }
-
-          // Emit the merge code.
-          cgen()->frame()->MergeTo(entry_frame_);
-        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
-          // If this is the fall through frame, and it didn't need
-          // merge code, we need to pick up the frame so we can jump
-          // around subsequent merge blocks if necessary.
-          RegisterFile empty;
-          cgen()->SetFrame(frame, &empty);
-          reaching_frames_[i] = NULL;
-        }
-      }
-    }
-
-    // The code generator may not have a current frame if there was no
-    // fall through and none of the reaching frames needed merging.
-    // In that case, clone the entry frame as the current frame.
-    if (!cgen()->has_valid_frame()) {
-      RegisterFile empty;
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    }
-
-    // There may be unprocessed reaching frames that did not need
-    // merge code.  They will have unbound merge labels.  Bind their
-    // merge labels to be the same as the entry label and deallocate
-    // them.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (!merge_labels_[i].is_bound()) {
-        reaching_frames_[i] = NULL;
-        __ bind(&merge_labels_[i]);
-      }
-    }
-
-    // There are non-NULL reaching frames with bound labels for each
-    // merge block, but only on backward targets.
-  } else {
-    // There were no forward jumps.  There must be a current frame and
-    // this must be a bidirectional target.
-    ASSERT(reaching_frames_.length() == 1);
-    ASSERT(reaching_frames_[0] != NULL);
-    ASSERT(direction_ == BIDIRECTIONAL);
-
-    // Use a copy of the reaching frame so the original can be saved
-    // for possible reuse as a backward merge block.
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
-    __ bind(&merge_labels_[0]);
-    cgen()->frame()->MergeTo(entry_frame_);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-  }
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 7ceff76..56e6cc2 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -440,14 +440,16 @@
 }
 
 
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode,
+                               int argc) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ call(code, mode);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode, argc);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
@@ -458,6 +460,13 @@
 }
 
 
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
 void LCodeGen::CallRuntime(const Runtime::Function* function,
                            int num_arguments,
                            LInstruction* instr) {
@@ -467,11 +476,23 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode,
+                                          int argc) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -483,8 +504,17 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    ASSERT(argc == 0);
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        argc,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -534,7 +564,7 @@
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
         jump_table_.last().address != entry) {
-      jump_table_.Add(entry);
+      jump_table_.Add(JumpTableEntry(entry));
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -605,6 +635,8 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(kind == expected_safepoint_kind_);
+
   const ZoneList<LOperand*>* operands = pointers->operands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -1328,11 +1360,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ Pushad();
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ Popad();
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 
@@ -1937,23 +1966,34 @@
 
 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                 Label* map_check) {
-  __ PushSafepointRegisters();
-  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
-      InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
-  InstanceofStub stub(flags);
+  {
+    PushSafepointRegistersScope scope(this);
+    InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+        InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
+    InstanceofStub stub(flags);
 
-  __ push(ToRegister(instr->InputAt(0)));
-  __ Push(instr->function());
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(rdi));
-  static const int kAdditionalDelta = 16;
-  int delta =
-      masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
-  __ movq(temp, Immediate(delta));
-  __ push(temp);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+    __ push(ToRegister(instr->InputAt(0)));
+    __ Push(instr->function());
+
+    Register temp = ToRegister(instr->TempAt(0));
+    static const int kAdditionalDelta = 13;
+    int delta =
+        masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+    __ movq(temp, Immediate(delta));
+    __ push(temp);
+
+    // We are pushing three values on the stack but recording a
+    // safepoint with two arguments because stub is going to
+    // remove the third argument from the stack before jumping
+    // to instanceof builtin on the slow path.
+    CallCodeGeneric(stub.GetCode(),
+                    RelocInfo::CODE_TARGET,
+                    instr,
+                    RECORD_SAFEPOINT_WITH_REGISTERS,
+                    2);
+    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+    __ movq(kScratchRegister, rax);
+  }
   __ testq(kScratchRegister, kScratchRegister);
   Label load_false;
   Label done;
@@ -2075,7 +2115,9 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->name());
-  Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
 }
 
@@ -2535,7 +2577,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2560,7 +2602,7 @@
   Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   Label negative;
   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2581,9 +2623,7 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(rax)) {
     __ movq(tmp, rax);
@@ -2600,7 +2640,6 @@
   __ StoreToSafepointRegisterSlot(input_reg, tmp);
 
   __ bind(&done);
-  __ PopSafepointRegisters();
 }
 
 
@@ -2942,7 +2981,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3038,7 +3077,7 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3159,7 +3198,7 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3172,16 +3211,12 @@
     __ Integer32ToSmi(index, index);
     __ push(index);
   }
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(rax);
   }
   __ SmiToInteger32(rax, rax);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3224,14 +3259,11 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ Integer32ToSmi(char_code, char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3296,13 +3328,12 @@
   Register reg = ToRegister(instr->result());
   __ Move(reg, Smi::FromInt(0));
 
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  // Ensure that value in rax survives popping registers.
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+  {
+    PushSafepointRegistersScope scope(this);
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Ensure that value in rax survives popping registers.
+    __ movq(kScratchRegister, rax);
+  }
   __ movq(reg, kScratchRegister);
 }
 
@@ -3392,7 +3423,7 @@
     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     __ cvttsd2siq(input_reg, xmm0);
     __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
-    __ cmpl(input_reg, kScratchRegister);
+    __ cmpq(input_reg, kScratchRegister);
     DeoptimizeIf(equal, instr->environment());
   } else {
     // Deoptimize if we don't have a heap number.
@@ -3457,7 +3488,7 @@
     // the JS bitwise operations.
     __ cvttsd2siq(result_reg, input_reg);
     __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
-    __ cmpl(result_reg, kScratchRegister);
+    __ cmpq(result_reg, kScratchRegister);
       DeoptimizeIf(equal, instr->environment());
   } else {
     __ cvttsd2si(result_reg, input_reg);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index f44fdb9..34277f6 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -60,7 +60,8 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -124,7 +125,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -156,12 +157,26 @@
   bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS
+  };
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode,
+                       int argc);
+
+
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
                 LInstruction* instr);
+
   void CallRuntime(const Runtime::Function* function,
                    int num_arguments,
                    LInstruction* instr);
+
   void CallRuntime(Runtime::FunctionId id,
                    int num_arguments,
                    LInstruction* instr) {
@@ -169,6 +184,11 @@
     CallRuntime(function, num_arguments, instr);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -177,7 +197,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode,
+                                  int argc);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -250,7 +272,7 @@
   void EmitPushConstantOperand(LOperand* operand);
 
   struct JumpTableEntry {
-    inline JumpTableEntry(Address entry)
+    explicit inline JumpTableEntry(Address entry)
         : label(),
           address(entry) { }
     Label label;
@@ -281,6 +303,27 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index e0adc1f..07ca3a5 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1114,9 +1114,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 512abbb..15bb894 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1295,6 +1295,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1616,6 +1617,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1671,12 +1673,14 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
   virtual void PrintDataTo(StringStream* stream);
 
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8e050bc..b097166 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "assembler-x64.h"
 #include "macro-assembler-x64.h"
 #include "serialize.h"
@@ -1320,6 +1320,7 @@
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   or_(dst, src2);
@@ -1340,6 +1341,7 @@
 
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   xor_(dst, src2);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9fde18d..4c17720 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -323,6 +323,16 @@
                                            Register src,
                                            int power);
 
+  // Perform the logical or of two smi values and return a smi value.
+  // If either argument is not a smi, jump to on_not_smis and retain
+  // the original values of source registers. The destination register
+  // may be changed if it's not one of the source registers.
+  template <typename LabelType>
+  void SmiOrIfSmis(Register dst,
+                   Register src1,
+                   Register src2,
+                   LabelType* on_not_smis);
+
 
   // Simple comparison of smis.  Both sides must be known smis to use these,
   // otherwise use Cmp.
@@ -1083,6 +1093,10 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  static int SafepointRegisterStackIndex(Register reg) {
+    return SafepointRegisterStackIndex(reg.code());
+  }
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1786,6 +1800,24 @@
 
 
 template <typename LabelType>
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 LabelType* on_not_smis) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis);
+  }
+}
+
+
+template <typename LabelType>
 void MacroAssembler::JumpIfNotString(Register object,
                                      Register object_map,
                                      LabelType* not_string) {
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index 5df3d54..0000000
--- a/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
-      reg.is(kScratchRegister) || reg.is(kRootRegister) ||
-      reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // rax
-    2,   // rcx
-    3,   // rdx
-    1,   // rbx
-    -1,  // rsp  Stack pointer.
-    -1,  // rbp  Frame pointer.
-    -1,  // rsi  Context.
-    4,   // rdi
-    5,   // r8
-    6,   // r9
-    -1,  // r10  Scratch register.
-    8,   // r11
-    -1,  // r12  Smi constant.
-    -1,  // r13  Roots array.  This is callee saved.
-    7,   // r14
-    9    // r15
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] =
-      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r15 };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-  // The non-reserved rdi register is live on JS function entry.
-  Use(rdi);  // JS function.
-}
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 65189f5..0000000
--- a/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  ASSERT(is_valid());
-  if (is_constant()) {
-    CodeGenerator* code_generator =
-        CodeGeneratorScope::Current(Isolate::Current());
-    Result fresh = code_generator->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    code_generator->masm()->Move(fresh.reg(), handle());
-    // This result becomes a copy of the fresh one.
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  }
-  ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
-  ASSERT(is_valid());
-  CodeGenerator* code_generator =
-      CodeGeneratorScope::Current(Isolate::Current());
-  if (!is_register() || !reg().is(target)) {
-    Result fresh = code_generator->allocator()->Allocate(target);
-    ASSERT(fresh.is_valid());
-    if (is_register()) {
-      code_generator->masm()->movq(fresh.reg(), reg());
-    } else {
-      ASSERT(is_constant());
-      code_generator->masm()->Move(fresh.reg(), handle());
-    }
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  } else if (is_register() && reg().is(target)) {
-    ASSERT(code_generator->has_valid_frame());
-    code_generator->frame()->Spill(target);
-    ASSERT(code_generator->allocator()->count(target) == 1);
-  }
-  ASSERT(is_register());
-  ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // This function is not used in 64-bit code.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
deleted file mode 100644
index a2884d9..0000000
--- a/src/x64/register-allocator-x64.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  static const int kNumRegisters = 10;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 7494fe0..26b20f4 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index 10c327a..0000000
--- a/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
-  // Registers live on entry to a JS frame:
-  //   rsp: stack pointer, points to return address from this function.
-  //   rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
-  //        Trampoline frame.
-  //   rsi: context of this function call.
-  //   rdi: pointer to this function object.
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  if (FLAG_debug_code) {
-    // Verify that rdi contains a JS function.  The following code
-    // relies on rax being available for use.
-    Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
-    __ Check(not_smi,
-             "VirtualFrame::Enter - rdi is not a function (smi check).");
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-    __ Check(equal,
-             "VirtualFrame::Enter - rdi is not a function (map check).");
-  }
-#endif
-
-  EmitPush(rbp);
-
-  __ movq(rbp, rsp);
-
-  // Store the context in the frame.  The context is kept in rsi and a
-  // copy is stored in the frame.  The external reference to rsi
-  // remains.
-  EmitPush(rsi);
-
-  // Store the function in the frame.  The frame owns the register
-  // reference now (ie, it can keep it in rdi or spill it later).
-  Push(rdi);
-  SyncElementAt(element_count() - 1);
-  cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See GenerateReturnSequence for the full return sequence.
-  // TODO(X64): A patched call will be very long now.  Make sure we
-  // have enough room.
-  __ movq(rsp, rbp);
-  stack_pointer_ = frame_pointer();
-  for (int i = element_count() - 1; i > stack_pointer_; i--) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      Unuse(last.reg());
-    }
-  }
-
-  EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    // The locals are initialized to a constant (the undefined value), but
-    // we sync them with the actual frame to allocate space for spilling
-    // them later.  First sync everything above the stack pointer so we can
-    // use pushes to allocate and initialize the locals.
-    SyncRange(stack_pointer_ + 1, element_count() - 1);
-    Handle<Object> undefined = FACTORY->undefined_value();
-    FrameElement initial_value =
-        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count < kLocalVarBound) {
-      // For fewer locals the unrolled loop is more compact.
-
-      // Hope for one of the first eight registers, where the push operation
-      // takes only one byte (kScratchRegister needs the REX.W bit).
-      Result tmp = cgen()->allocator()->Allocate();
-      ASSERT(tmp.is_valid());
-      __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
-      for (int i = 0; i < count; i++) {
-        __ push(tmp.reg());
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      Result cnt = cgen()->allocator()->Allocate();
-      ASSERT(cnt.is_valid());
-      __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
-      Label loop_size;
-      __ bind(&loop_size);
-#endif
-      if (is_uint8(count)) {
-        // Loading imm8 is shorter than loading imm32.
-        // Loading only partial byte register, and using decb below.
-        __ movb(cnt.reg(), Immediate(count));
-      } else {
-        __ movl(cnt.reg(), Immediate(count));
-      }
-      __ bind(&alloc_locals_loop);
-      __ push(kScratchRegister);
-      if (is_uint8(count)) {
-        __ decb(cnt.reg());
-      } else {
-        __ decl(cnt.reg());
-      }
-      __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
-      CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
-    }
-    for (int i = 0; i < count; i++) {
-      elements_.Add(initial_value);
-      stack_pointer_++;
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen()->allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ lea(temp.reg(), ParameterAt(-1));
-  Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
-  stack_pointer_++;
-  __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  TypeInfo info = TypeInfo::TypeFromValue(value);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
-  ASSERT(expr->IsTrivial());
-
-  Literal* lit = expr->AsLiteral();
-  if (lit != NULL) {
-    Push(lit->handle());
-    return;
-  }
-
-  VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL) {
-    Slot* slot = proxy->var()->AsSlot();
-    if (slot->type() == Slot::LOCAL) {
-      PushLocalAt(slot->index());
-      return;
-    }
-    if (slot->type() == Slot::PARAMETER) {
-      PushParameterAt(slot->index());
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  if (ConstantPoolOverflowed()) {
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    if (value->IsSmi()) {
-      __ Move(temp.reg(), Smi::cast(*value));
-    } else {
-      __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
-    }
-    Push(&temp);
-  } else {
-    FrameElement element =
-        FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-    elements_.Add(element);
-  }
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ addq(rsp, Immediate(num_dropped * kPointerSize));
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  FrameElement original = elements_[index];
-
-  // Is this element the backing store of any copies?
-  int new_backing_index = kIllegalIndex;
-  if (original.is_copied()) {
-    // Verify it is copied, and find first copy.
-    for (int i = index + 1; i < element_count(); i++) {
-      if (elements_[i].is_copy() && elements_[i].index() == index) {
-        new_backing_index = i;
-        break;
-      }
-    }
-  }
-
-  if (new_backing_index == kIllegalIndex) {
-    // No copies found, return kIllegalIndex.
-    if (original.is_register()) {
-      Unuse(original.reg());
-    }
-    elements_[index] = FrameElement::InvalidElement();
-    return kIllegalIndex;
-  }
-
-  // This is the backing store of copies.
-  Register backing_reg;
-  if (original.is_memory()) {
-    Result fresh = cgen()->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    Use(fresh.reg(), new_backing_index);
-    backing_reg = fresh.reg();
-    __ movq(backing_reg, Operand(rbp, fp_relative(index)));
-  } else {
-    // The original was in a register.
-    backing_reg = original.reg();
-    set_register_location(backing_reg, new_backing_index);
-  }
-  // Invalidate the element at index.
-  elements_[index] = FrameElement::InvalidElement();
-  // Set the new backing element.
-  if (elements_[new_backing_index].is_synced()) {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::SYNCED,
-                                      original.type_info());
-  } else {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::NOT_SYNCED,
-                                      original.type_info());
-  }
-  // Update the other copies.
-  for (int i = new_backing_index + 1; i < element_count(); i++) {
-    if (elements_[i].is_copy() && elements_[i].index() == index) {
-      elements_[i].set_index(new_backing_index);
-      elements_[new_backing_index].set_copied();
-    }
-  }
-  return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  int new_backing_store_index = InvalidateFrameSlotAt(index);
-  if (new_backing_store_index != kIllegalIndex) {
-    elements_.Add(CopyElementAt(new_backing_store_index));
-    return;
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY: {
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      Use(fresh.reg(), element_count());
-      elements_.Add(new_element);
-      __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
-      break;
-    }
-    case FrameElement::REGISTER:
-      Use(original.reg(), element_count());
-      // Fall through.
-    case FrameElement::CONSTANT:
-    case FrameElement::COPY:
-      original.clear_sync();
-      elements_.Add(original);
-      break;
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  // Store the value on top of the frame to the virtual frame slot at
-  // a given index.  The value on top of the frame is left in place.
-  // This is a duplicating operation, so it can create copies.
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  int top_index = element_count() - 1;
-  FrameElement top = elements_[top_index];
-  FrameElement original = elements_[index];
-  if (top.is_copy() && top.index() == index) return;
-  ASSERT(top.is_valid());
-
-  InvalidateFrameSlotAt(index);
-
-  // InvalidateFrameSlotAt can potentially change any frame element, due
-  // to spilling registers to allocate temporaries in order to preserve
-  // the copy-on-write semantics of aliased elements.  Reload top from
-  // the frame.
-  top = elements_[top_index];
-
-  if (top.is_copy()) {
-    // There are two cases based on the relative positions of the
-    // stored-to slot and the backing slot of the top element.
-    int backing_index = top.index();
-    ASSERT(backing_index != index);
-    if (backing_index < index) {
-      // 1. The top element is a copy of a slot below the stored-to
-      // slot.  The stored-to slot becomes an unsynced copy of that
-      // same backing slot.
-      elements_[index] = CopyElementAt(backing_index);
-    } else {
-      // 2. The top element is a copy of a slot above the stored-to
-      // slot.  The stored-to slot becomes the new (unsynced) backing
-      // slot and both the top element and the element at the former
-      // backing slot become copies of it.  The sync state of the top
-      // and former backing elements is preserved.
-      FrameElement backing_element = elements_[backing_index];
-      ASSERT(backing_element.is_memory() || backing_element.is_register());
-      if (backing_element.is_memory()) {
-        // Because sets of copies are canonicalized to be backed by
-        // their lowest frame element, and because memory frame
-        // elements are backed by the corresponding stack address, we
-        // have to move the actual value down in the stack.
-        //
-        // TODO(209): considering allocating the stored-to slot to the
-        // temp register.  Alternatively, allow copies to appear in
-        // any order in the frame and lazily move the value down to
-        // the slot.
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        set_register_location(backing_element.reg(), index);
-        if (backing_element.is_synced()) {
-          // If the element is a register, we will not actually move
-          // anything on the stack but only update the virtual frame
-          // element.
-          backing_element.clear_sync();
-        }
-      }
-      elements_[index] = backing_element;
-
-      // The old backing element becomes a copy of the new backing
-      // element.
-      FrameElement new_element = CopyElementAt(index);
-      elements_[backing_index] = new_element;
-      if (backing_element.is_synced()) {
-        elements_[backing_index].set_sync();
-      }
-
-      // All the copies of the old backing element (including the top
-      // element) become copies of the new backing element.
-      for (int i = backing_index + 1; i < element_count(); i++) {
-        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
-          elements_[i].set_index(index);
-        }
-      }
-    }
-    return;
-  }
-
-  // Move the top element to the stored-to slot and replace it (the
-  // top element) with a copy.
-  elements_[index] = top;
-  if (top.is_memory()) {
-    // TODO(209): consider allocating the stored-to slot to the temp
-    // register.  Alternatively, allow copies to appear in any order
-    // in the frame and lazily move the value down to the slot.
-    FrameElement new_top = CopyElementAt(index);
-    new_top.set_sync();
-    elements_[top_index] = new_top;
-
-    // The sync state of the former top element is correct (synced).
-    // Emit code to move the value down in the frame.
-    __ movq(kScratchRegister, Operand(rsp, 0));
-    __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-  } else if (top.is_register()) {
-    set_register_location(top.reg(), index);
-    // The stored-to slot has the (unsynced) register reference and
-    // the top element becomes a copy.  The sync state of the top is
-    // preserved.
-    FrameElement new_top = CopyElementAt(index);
-    if (top.is_synced()) {
-      new_top.set_sync();
-      elements_[index].clear_sync();
-    }
-    elements_[top_index] = new_top;
-  } else {
-    // The stored-to slot holds the same value as the top but
-    // unsynced.  (We do not have copies of constants yet.)
-    ASSERT(top.is_constant());
-    elements_[index].clear_sync();
-  }
-}
-
-
-void VirtualFrame::MakeMergable() {
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement element = elements_[i];
-
-    // In all cases we have to reset the number type information
-    // to unknown for a mergable frame because of incoming back edges.
-    if (element.is_constant() || element.is_copy()) {
-      if (element.is_synced()) {
-        // Just spill.
-        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
-      } else {
-        // Allocate to a register.
-        FrameElement backing_element;  // Invalid if not a copy.
-        if (element.is_copy()) {
-          backing_element = elements_[element.index()];
-        }
-        Result fresh = cgen()->allocator()->Allocate();
-        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
-        elements_[i] =
-            FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED,
-                                          TypeInfo::Unknown());
-        Use(fresh.reg(), i);
-
-        // Emit a move.
-        if (element.is_constant()) {
-          __ Move(fresh.reg(), element.handle());
-        } else {
-          ASSERT(element.is_copy());
-          // Copies are only backed by register or memory locations.
-          if (backing_element.is_register()) {
-            // The backing store may have been spilled by allocating,
-            // but that's OK.  If it was, the value is right where we
-            // want it.
-            if (!fresh.reg().is(backing_element.reg())) {
-              __ movq(fresh.reg(), backing_element.reg());
-            }
-          } else {
-            ASSERT(backing_element.is_memory());
-            __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
-          }
-        }
-      }
-      // No need to set the copied flag --- there are no copies.
-    } else {
-      // Clear the copy flag of non-constant, non-copy elements.
-      // They cannot be copied because copies are not allowed.
-      // The copy flag is not relied on before the end of this loop,
-      // including when registers are spilled.
-      elements_[i].clear_copied();
-      elements_[i].set_type_info(TypeInfo::Unknown());
-    }
-  }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm(), "[ Merge frame");
-  // We should always be merging the code generator's current frame to an
-  // expected frame.
-  ASSERT(cgen()->frame() == this);
-
-  // Adjust the stack pointer upward (toward the top of the virtual
-  // frame) if necessary.
-  if (stack_pointer_ < expected->stack_pointer_) {
-    int difference = expected->stack_pointer_ - stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ subq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  MergeMoveRegistersToMemory(expected);
-  MergeMoveRegistersToRegisters(expected);
-  MergeMoveMemoryToRegisters(expected);
-
-  // Adjust the stack pointer downward if necessary.
-  if (stack_pointer_ > expected->stack_pointer_) {
-    int difference = stack_pointer_ - expected->stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ addq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  // At this point, the frames should be identical.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  // Move registers, constants, and copies to memory.  Perform moves
-  // from the top downward in the frame in order to leave the backing
-  // stores of copies in registers.
-  for (int i = element_count() - 1; i >= 0; i--) {
-    FrameElement target = expected->elements_[i];
-    if (target.is_register()) continue;  // Handle registers later.
-    if (target.is_memory()) {
-      FrameElement source = elements_[i];
-      switch (source.type()) {
-        case FrameElement::INVALID:
-          // Not a legal merge move.
-          UNREACHABLE();
-          break;
-
-        case FrameElement::MEMORY:
-          // Already in place.
-          break;
-
-        case FrameElement::REGISTER:
-          Unuse(source.reg());
-          if (!source.is_synced()) {
-            __ movq(Operand(rbp, fp_relative(i)), source.reg());
-          }
-          break;
-
-        case FrameElement::CONSTANT:
-          if (!source.is_synced()) {
-            __ Move(Operand(rbp, fp_relative(i)), source.handle());
-          }
-          break;
-
-        case FrameElement::COPY:
-          if (!source.is_synced()) {
-            int backing_index = source.index();
-            FrameElement backing_element = elements_[backing_index];
-            if (backing_element.is_memory()) {
-              __ movq(kScratchRegister,
-                       Operand(rbp, fp_relative(backing_index)));
-              __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
-            } else {
-              ASSERT(backing_element.is_register());
-              __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
-            }
-          }
-          break;
-      }
-    }
-    elements_[i] = target;
-  }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  // We have already done X-to-memory moves.
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    // Move the right value into register i if it is currently in a register.
-    int index = expected->register_location(i);
-    int use_index = register_location(i);
-    // Skip if register i is unused in the target or else if source is
-    // not a register (this is not a register-to-register move).
-    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
-    Register target = RegisterAllocator::ToRegister(i);
-    Register source = elements_[index].reg();
-    if (index != use_index) {
-      if (use_index == kIllegalIndex) {  // Target is currently unused.
-        // Copy contents of source from source to target.
-        // Set frame element register to target.
-        Use(target, index);
-        Unuse(source);
-        __ movq(target, source);
-      } else {
-        // Exchange contents of registers source and target.
-        // Nothing except the register backing use_index has changed.
-        elements_[use_index].set_reg(source);
-        set_register_location(target, index);
-        set_register_location(source, use_index);
-        __ xchg(source, target);
-      }
-    }
-
-    if (!elements_[index].is_synced() &&
-        expected->elements_[index].is_synced()) {
-      __ movq(Operand(rbp, fp_relative(index)), target);
-    }
-    elements_[index] = expected->elements_[index];
-  }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  // Move memory, constants, and copies to registers.  This is the
-  // final step and since it is not done from the bottom up, but in
-  // register code order, we have special code to ensure that the backing
-  // elements of copies are in their correct locations when we
-  // encounter the copies.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int index = expected->register_location(i);
-    if (index != kIllegalIndex) {
-      FrameElement source = elements_[index];
-      FrameElement target = expected->elements_[index];
-      Register target_reg = RegisterAllocator::ToRegister(i);
-      ASSERT(target.reg().is(target_reg));
-      switch (source.type()) {
-        case FrameElement::INVALID:  // Fall through.
-          UNREACHABLE();
-          break;
-        case FrameElement::REGISTER:
-          ASSERT(source.Equals(target));
-          // Go to next iteration.  Skips Use(target_reg) and syncing
-          // below.  It is safe to skip syncing because a target
-          // register frame element would only be synced if all source
-          // elements were.
-          continue;
-          break;
-        case FrameElement::MEMORY:
-          ASSERT(index <= stack_pointer_);
-          __ movq(target_reg, Operand(rbp, fp_relative(index)));
-          break;
-
-        case FrameElement::CONSTANT:
-          __ Move(target_reg, source.handle());
-          break;
-
-        case FrameElement::COPY: {
-          int backing_index = source.index();
-          FrameElement backing = elements_[backing_index];
-          ASSERT(backing.is_memory() || backing.is_register());
-          if (backing.is_memory()) {
-            ASSERT(backing_index <= stack_pointer_);
-            // Code optimization if backing store should also move
-            // to a register: move backing store to its register first.
-            if (expected->elements_[backing_index].is_register()) {
-              FrameElement new_backing = expected->elements_[backing_index];
-              Register new_backing_reg = new_backing.reg();
-              ASSERT(!is_used(new_backing_reg));
-              elements_[backing_index] = new_backing;
-              Use(new_backing_reg, backing_index);
-              __ movq(new_backing_reg,
-                      Operand(rbp, fp_relative(backing_index)));
-              __ movq(target_reg, new_backing_reg);
-            } else {
-              __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
-            }
-          } else {
-            __ movq(target_reg, backing.reg());
-          }
-        }
-      }
-      // Ensure the proper sync state.
-      if (target.is_synced() && !source.is_synced()) {
-        __ movq(Operand(rbp, fp_relative(index)), target_reg);
-      }
-      Use(target_reg, index);
-      elements_[index] = target;
-    }
-  }
-}
-
-
-Result VirtualFrame::Pop() {
-  FrameElement element = elements_.RemoveLast();
-  int index = element_count();
-  ASSERT(element.is_valid());
-
-  // Get number type information of the result.
-  TypeInfo info;
-  if (!element.is_copy()) {
-    info = element.type_info();
-  } else {
-    info = elements_[element.index()].type_info();
-  }
-
-  bool pop_needed = (stack_pointer_ == index);
-  if (pop_needed) {
-    stack_pointer_--;
-    if (element.is_memory()) {
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ pop(temp.reg());
-      temp.set_type_info(info);
-      return temp;
-    }
-
-    __ addq(rsp, Immediate(kPointerSize));
-  }
-  ASSERT(!element.is_memory());
-
-  // The top element is a register, constant, or a copy.  Unuse
-  // registers and follow copies to their backing store.
-  if (element.is_register()) {
-    Unuse(element.reg());
-  } else if (element.is_copy()) {
-    ASSERT(element.index() < index);
-    index = element.index();
-    element = elements_[index];
-  }
-  ASSERT(!element.is_copy());
-
-  // The element is memory, a register, or a constant.
-  if (element.is_memory()) {
-    // Memory elements could only be the backing store of a copy.
-    // Allocate the original to a register.
-    ASSERT(index <= stack_pointer_);
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    Use(temp.reg(), index);
-    FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(),
-                                      FrameElement::SYNCED,
-                                      element.type_info());
-    // Preserve the copy flag on the element.
-    if (element.is_copied()) new_element.set_copied();
-    elements_[index] = new_element;
-    __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
-    return Result(temp.reg(), info);
-  } else if (element.is_register()) {
-    return Result(element.reg(), info);
-  } else {
-    ASSERT(element.is_constant());
-    return Result(element.handle());
-  }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallStub(stub);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
-  PrepareForCall(0, 0);
-  arg->ToRegister(rax);
-  arg->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  PrepareForCall(0, 0);
-
-  if (arg0->is_register() && arg0->reg().is(rax)) {
-    if (arg1->is_register() && arg1->reg().is(rdx)) {
-      // Wrong registers.
-      __ xchg(rax, rdx);
-    } else {
-      // Register rdx is free for arg0, which frees rax for arg1.
-      arg0->ToRegister(rdx);
-      arg1->ToRegister(rax);
-    }
-  } else {
-    // Register rax is free for arg1, which guarantees rdx is free for
-    // arg0.
-    arg1->ToRegister(rax);
-    arg0->ToRegister(rdx);
-  }
-
-  arg0->Unuse();
-  arg1->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
-  Result function = Pop();
-
-  // InvokeFunction requires function in rdi.  Move it in there.
-  function.ToRegister(rdi);
-  function.Unuse();
-
-  // +1 for receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(rdi, count, CALL_FUNCTION);
-  RestoreContextRegister();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  // Emit code to write elements below the stack pointer to their
-  // (already allocated) stack address.
-  ASSERT(index <= stack_pointer_);
-  FrameElement element = elements_[index];
-  ASSERT(!element.is_synced());
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      break;
-
-    case FrameElement::MEMORY:
-      // This function should not be called with synced elements.
-      // (memory elements are always synced).
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ movq(Operand(rbp, fp_relative(index)), element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(Operand(rbp, fp_relative(index)), element.handle());
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing_element = elements_[backing_index];
-      if (backing_element.is_memory()) {
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        ASSERT(backing_element.is_register());
-        __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
-  // Sync an element of the frame that is just above the stack pointer
-  // by pushing it.
-  ASSERT(index == stack_pointer_ + 1);
-  stack_pointer_++;
-  FrameElement element = elements_[index];
-
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      __ Push(Smi::FromInt(0));
-      break;
-
-    case FrameElement::MEMORY:
-      // No memory elements exist above the stack pointer.
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ push(element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(kScratchRegister, element.handle());
-      __ push(kScratchRegister);
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing = elements_[backing_index];
-      ASSERT(backing.is_memory() || backing.is_register());
-      if (backing.is_memory()) {
-        __ push(Operand(rbp, fp_relative(backing_index)));
-      } else {
-        __ push(backing.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
-  ASSERT(begin >= 0);
-  ASSERT(end < element_count());
-  // Sync elements below the range if they have not been materialized
-  // on the stack.
-  int start = Min(begin, stack_pointer_ + 1);
-  int end_or_stack_pointer = Min(stack_pointer_, end);
-  // Emit normal push instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
-  int i = start;
-
-  while (i <= end_or_stack_pointer) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
-    i++;
-  }
-  while (i <= end) {
-    SyncElementByPushing(i);
-    i++;
-  }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  PrepareForCall(0, 0);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
-                                          Result* b,
-                                          Register a_reg,
-                                          Register b_reg) {
-  ASSERT(!a_reg.is(b_reg));
-  // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(a_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
-         (a->is_register() && a->reg().is(a_reg)) ||
-         (b->is_register() && b->reg().is(a_reg)));
-  // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(b_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
-         (a->is_register() && a->reg().is(b_reg)) ||
-         (b->is_register() && b->reg().is(b_reg)));
-
-  if (a->is_register() && a->reg().is(a_reg)) {
-    b->ToRegister(b_reg);
-  } else if (!cgen()->allocator()->is_used(a_reg)) {
-    a->ToRegister(a_reg);
-    b->ToRegister(b_reg);
-  } else if (cgen()->allocator()->is_used(b_reg)) {
-    // a must be in b_reg, b in a_reg.
-    __ xchg(a_reg, b_reg);
-    // Results a and b will be invalidated, so it is ok if they are switched.
-  } else {
-    b->ToRegister(b_reg);
-    a->ToRegister(a_reg);
-  }
-  a->Unuse();
-  b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
-  // Name and receiver are on the top of the frame.  Both are dropped.
-  // The IC expects name in rcx and receiver in rax.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  Result name = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame. Put them in rax and rdx.
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
-                                 bool is_contextual,
-                                 StrictModeFlag strict_mode) {
-  // Value and (if not contextual) receiver are on top of the frame.
-  // The IC expects name in rcx, value in rax, and receiver in rdx.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-  Result value = Pop();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    PrepareForCall(0, 0);
-    value.ToRegister(rax);
-    __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    value.Unuse();
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    Result receiver = Pop();
-    PrepareForCall(0, 0);
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ Move(rcx, name);
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in rax, key in rcx, and receiver in rdx.
-  Result value = Pop();
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  if (!cgen()->allocator()->is_used(rax) ||
-      (value.is_register() && value.reg().is(rax))) {
-    if (!cgen()->allocator()->is_used(rax)) {
-      value.ToRegister(rax);
-    }
-    MoveResultsToRegisters(&key, &receiver, rcx, rdx);
-    value.Unuse();
-  } else if (!cgen()->allocator()->is_used(rcx) ||
-             (key.is_register() && key.reg().is(rcx))) {
-    if (!cgen()->allocator()->is_used(rcx)) {
-      key.ToRegister(rcx);
-    }
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    key.Unuse();
-  } else if (!cgen()->allocator()->is_used(rdx) ||
-             (receiver.is_register() && receiver.reg().is(rdx))) {
-    if (!cgen()->allocator()->is_used(rdx)) {
-      receiver.ToRegister(rdx);
-    }
-    MoveResultsToRegisters(&key, &value, rcx, rax);
-    receiver.Unuse();
-  } else {
-    // All three registers are used, and no value is in the correct place.
-    // We have one of the two circular permutations of rax, rcx, rdx.
-    ASSERT(value.is_register());
-    if (value.reg().is(rcx)) {
-      __ xchg(rax, rdx);
-      __ xchg(rax, rcx);
-    } else {
-      __ xchg(rax, rcx);
-      __ xchg(rax, rdx);
-    }
-    value.Unuse();
-    key.Unuse();
-    receiver.Unuse();
-  }
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
-                                int arg_count,
-                                int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
-                                     int arg_count,
-                                     int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
-  // Arguments, receiver, and function are on top of the frame.  The
-  // IC expects arg count in rax, function in rdi, and the arguments
-  // and receiver on the stack.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  // Duplicate the function before preparing the frame.
-  PushElementAt(arg_count);
-  Result function = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill function and args.
-  function.ToRegister(rdi);
-
-  // Constructors are called with the number of arguments in register
-  // rax for now. Another option would be to have separate construct
-  // call trampolines per different arguments counts encountered.
-  Result num_args = cgen()->allocator()->Allocate(rax);
-  ASSERT(num_args.is_valid());
-  __ Set(num_args.reg(), arg_count);
-
-  function.Unuse();
-  num_args.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less one (the return
-  // address is already pushed by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
deleted file mode 100644
index aac9864..0000000
--- a/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
-      ASSERT(cgen()->has_valid_frame());
-      cgen()->frame()->SpillAll();
-      cgen()->set_in_spilled_code(true);
-    }
-
-    ~SpilledScope() {
-      cgen()->set_in_spilled_code(previous_state_);
-    }
-
-   private:
-    bool previous_state_;
-
-    CodeGenerator* cgen() {
-      return CodeGeneratorScope::Current(Isolate::Current());
-    }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  CodeGenerator* cgen() {
-    return CodeGeneratorScope::Current(Isolate::Current());
-  }
-
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-    TypeInfo info = TypeInfo::Uninitialized());
-
-  // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
-
-  // The height of the virtual expression stack.
-  int height() {
-    return element_count() - expression_base_index();
-  }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  inline int register_location(Register reg);
-
-  inline void set_register_location(Register reg, int index);
-
-  bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  inline bool is_used(Register reg);
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget count elements from the top of the frame all in-memory
-  // (including synced) and adjust the stack pointer downward, to
-  // match an external frame effect (examples include a call removing
-  // its arguments, and exiting a try/catch removing an exception
-  // handler).  No code will be emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    ForgetElements(count);
-  }
-
-  // Forget count elements from the top of the frame without adjusting
-  // the stack pointer downward.  This is used, for example, before
-  // merging frames at break, continue, and return targets.
-  void ForgetElements(int count);
-
-  // Spill all values from the frame to memory.
-  inline void SpillAll();
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
-  Register SpillAnyRegister();
-
-  // Spill the top element of the frame to memory.
-  void SpillTop() { SpillElementAt(element_count() - 1); }
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the frame.
-  void MakeMergable();
-
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected);
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Use(i);
-    }
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by spilling locals.  This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 14;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // An element of the expression stack as an assembly operand.
-  Operand ElementAt(int index) const {
-    return Operand(rsp, index * kPointerSize);
-  }
-
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  inline void SetElementAt(int index, Handle<Object> value);
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
-  void StoreToElementAt(int index) {
-    StoreToFrameSlotAt(element_count() - index - 1);
-  }
-
-  // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return Operand(rbp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // Push the function on top of the frame.
-  void PushFunction() { PushFrameSlotAt(function_index()); }
-
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
-  // A parameter as an assembly operand.
-  Operand ParameterAt(int index) {
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count());
-    return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
-  // The receiver frame slot.
-  Operand Receiver() { return ParameterAt(-1); }
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline Result CallStub(CodeStub* stub, int arg_count);
-
-  // Call stub that takes a single argument passed in eax.  The
-  // argument is given as a result which does not have to be eax or
-  // even a register.  The argument is consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg);
-
-  // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
-  // eax (arg1, rax).  The arguments are given as results which do not have
-  // to be in the proper registers or even in registers.  The
-  // arguments are consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  Result CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  Result CallRuntime(const Runtime::Function* f, int arg_count);
-  Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  Result InvokeBuiltin(Builtins::JavaScript id,
-                       InvokeFlag flag,
-                       int arg_count);
-
-  // Call load IC.  Name and receiver are found on top of the frame.
-  // Both are dropped.
-  Result CallLoadIC(RelocInfo::Mode mode);
-
-  // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  Both are dropped.
-  Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
-  // Call store IC.  If the load is contextual, value is found on top of the
-  // frame.  If not, value and receiver are on the frame.  Both are dropped.
-  Result CallStoreIC(Handle<String> name, bool is_contextual,
-                     StrictModeFlag strict_mode);
-
-  // Call keyed store IC.  Value, key, and receiver are found on top
-  Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call call IC.  Function name, arguments, and receiver are found on top
-  // of the frame and dropped by the call.
-  // The argument count does not include the receiver.
-  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Call keyed call IC.  Same calling convention as CallCallIC.
-  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Allocate and call JS function as constructor.  Arguments,
-  // receiver (global object), and function are found on top of the
-  // frame.  Function is not dropped.  The argument count does not
-  // include the receiver.
-  Result CallConstructor(int arg_count);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Duplicate the top element of the frame.
-  void Dup() { PushFrameSlotAt(element_count() - 1); }
-
-  // Duplicate the n'th element from the top of the frame.
-  // Dup(1) is equivalent to Dup().
-  void Dup(int n) {
-    ASSERT(n > 0);
-    PushFrameSlotAt(element_count() - n);
-  }
-
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  void EmitPop(const Operand& operand);
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(const Operand& operand,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Heap::RootListIndex index,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Immediate immediate,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Smi* value);
-  // Uses kScratchRegister, emits appropriate relocation info.
-  void EmitPush(Handle<Object> value);
-
-  inline bool ConstantPoolOverflowed();
-
-  // Push an element on the virtual frame.
-  void Push(Handle<Object> value);
-  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
-  inline void Push(Smi* value);
-
-  // Pushing a result invalidates it (its contents become owned by the
-  // frame).
-  void Push(Result* result) {
-    if (result->is_register()) {
-      Push(result->reg(), result->type_info());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    result->Unuse();
-  }
-
-  // Pushing an expression expects that the expression is trivial (according
-  // to Expression::IsTrivial).
-  void Push(Expression* expr);
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  inline void SetTypeForLocalAt(int index, TypeInfo info);
-  inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  ZoneList<FrameElement> elements_;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the esp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count();
-  inline int local_count();
-
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).  The parameters, receiver, and return address
-  // are below the frame pointer.
-  int frame_pointer() { return parameter_count() + 2; }
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame.  It is immediately
-  // above the frame pointer.
-  int context_index() { return frame_pointer() + 1; }
-
-  // The index of the function slot in the frame.  It is above the frame
-  // pointer and the context slot.
-  int function_index() { return frame_pointer() + 2; }
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lie the context and the function.
-  int local0_index() { return frame_pointer() + 3; }
-
-  // The index of the base of the expression stack.
-  int expression_base_index() { return local0_index() + local_count(); }
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  // This function assumes that a and b are the only results that could be in
-  // the registers a_reg or b_reg.  Other results can be live, but must not
-  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
-  void MoveResultsToRegisters(Result* a,
-                              Result* b,
-                              Register a_reg,
-                              Register b_reg);
-
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  Result RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class FrameRegisterState;
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_VIRTUAL_FRAME_X64_H_