Version 3.0.8

Exposed heap size limit to the heap statistics gathered by the GetHeapStatistics API. 

Wrapped external pointers more carefully (issue 1037).

Hardened the implementation of error objects to avoid setters intercepting the properties set then throwing an error.

Avoided trashing the FPSCR when calculating Math.floor on ARM.

Performance improvements on the IA32 platform.


git-svn-id: http://v8.googlecode.com/svn/trunk@6346 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 41083e2..6c4bf42 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,19 @@
-2010-01-10: Version 3.0.7
+2011-01-17: Version 3.0.8
+
+        Exposed heap size limit to the heap statistics gathered by
+        the GetHeapStatistics API. 
+
+        Wrapped external pointers more carefully (issue 1037).
+
+        Hardened the implementation of error objects to avoid setters
+        intercepting the properties set then throwing an error.
+
+        Avoided trashing the FPSCR when calculating Math.floor on ARM.
+
+        Performance improvements on the IA32 platform.
+
+
+2011-01-10: Version 3.0.7
 
         Stopped calling inherited setters when creating object literals
         (issue 1015).
@@ -12,7 +27,7 @@
         Performance improvements on the IA32 platform.
 
 
-2010-01-05: Version 3.0.6
+2011-01-05: Version 3.0.6
 
         Allowed getters and setters on JSArray elements (issue 900).
 
diff --git a/SConstruct b/SConstruct
index f6d1385..0f0877f 100644
--- a/SConstruct
+++ b/SConstruct
@@ -663,6 +663,8 @@
   if os == 'win32' and toolchain == 'gcc':
     # MinGW can't do it.
     return 'default'
+  elif os == 'solaris':
+    return 'default'
   else:
     return 'hidden'
 
diff --git a/include/v8.h b/include/v8.h
index 883bfad..7d18107 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2515,6 +2515,7 @@
   size_t total_heap_size() { return total_heap_size_; }
   size_t total_heap_size_executable() { return total_heap_size_executable_; }
   size_t used_heap_size() { return used_heap_size_; }
+  size_t heap_size_limit() { return heap_size_limit_; }
 
  private:
   void set_total_heap_size(size_t size) { total_heap_size_ = size; }
@@ -2522,10 +2523,12 @@
     total_heap_size_executable_ = size;
   }
   void set_used_heap_size(size_t size) { used_heap_size_ = size; }
+  void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
 
   size_t total_heap_size_;
   size_t total_heap_size_executable_;
   size_t used_heap_size_;
+  size_t heap_size_limit_;
 
   friend class V8;
 };
@@ -3350,10 +3353,10 @@
 const int kSmiTagSize = 1;
 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
 
-template <size_t ptr_size> struct SmiConstants;
+template <size_t ptr_size> struct SmiTagging;
 
 // Smi constants for 32-bit systems.
-template <> struct SmiConstants<4> {
+template <> struct SmiTagging<4> {
   static const int kSmiShiftSize = 0;
   static const int kSmiValueSize = 31;
   static inline int SmiToInt(internal::Object* value) {
@@ -3361,10 +3364,15 @@
     // Throw away top 32 bits and shift down (requires >> to be sign extending).
     return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
   }
+
+  // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
+  // with a plain reinterpret_cast.
+  static const intptr_t kEncodablePointerMask = 0x1;
+  static const int kPointerToSmiShift = 0;
 };
 
 // Smi constants for 64-bit systems.
-template <> struct SmiConstants<8> {
+template <> struct SmiTagging<8> {
   static const int kSmiShiftSize = 31;
   static const int kSmiValueSize = 32;
   static inline int SmiToInt(internal::Object* value) {
@@ -3372,10 +3380,26 @@
     // Shift down and throw away top 32 bits.
     return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
   }
+
+  // To maximize the range of pointers that can be encoded
+  // in the available 32 bits, we require them to be 8 bytes aligned.
+  // This gives 2 ^ (32 + 3) = 32G address space covered.
+  // It might be not enough to cover stack allocated objects on some platforms.
+  static const int kPointerAlignment = 3;
+
+  static const intptr_t kEncodablePointerMask =
+      ~(intptr_t(0xffffffff) << kPointerAlignment);
+
+  static const int kPointerToSmiShift =
+      kSmiTagSize + kSmiShiftSize - kPointerAlignment;
 };
 
-const int kSmiShiftSize = SmiConstants<kApiPointerSize>::kSmiShiftSize;
-const int kSmiValueSize = SmiConstants<kApiPointerSize>::kSmiValueSize;
+typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
+const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
+const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
+const intptr_t kEncodablePointerMask =
+    PlatformSmiTagging::kEncodablePointerMask;
+const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
 
 template <size_t ptr_size> struct InternalConstants;
 
@@ -3423,7 +3447,7 @@
   }
 
   static inline int SmiValue(internal::Object* value) {
-    return SmiConstants<kApiPointerSize>::SmiToInt(value);
+    return PlatformSmiTagging::SmiToInt(value);
   }
 
   static inline int GetInstanceType(internal::Object* obj) {
@@ -3432,9 +3456,14 @@
     return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
   }
 
+  static inline void* GetExternalPointerFromSmi(internal::Object* value) {
+    const intptr_t address = reinterpret_cast<intptr_t>(value);
+    return reinterpret_cast<void*>(address >> kPointerToSmiShift);
+  }
+
   static inline void* GetExternalPointer(internal::Object* obj) {
     if (HasSmiTag(obj)) {
-      return obj;
+      return GetExternalPointerFromSmi(obj);
     } else if (GetInstanceType(obj) == kProxyType) {
       return ReadField<void*>(obj, kProxyProxyOffset);
     } else {
diff --git a/src/SConscript b/src/SConscript
index 0c8e140..79b1204 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -213,6 +213,7 @@
     x64/ic-x64.cc
     x64/jump-target-x64.cc
     x64/lithium-x64.cc
+    x64/lithium-codegen-x64.cc
     x64/macro-assembler-x64.cc
     x64/regexp-macro-assembler-x64.cc
     x64/register-allocator-x64.cc
diff --git a/src/api.cc b/src/api.cc
index 110468e..073306f 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3266,18 +3266,35 @@
 }
 
 
+static bool CanBeEncodedAsSmi(void* ptr) {
+  const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+  return ((address & i::kEncodablePointerMask) == 0);
+}
+
+
+static i::Smi* EncodeAsSmi(void* ptr) {
+  ASSERT(CanBeEncodedAsSmi(ptr));
+  const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+  i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
+  ASSERT(i::Internals::HasSmiTag(result));
+  ASSERT_EQ(result, i::Smi::FromInt(result->value()));
+  ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
+  return result;
+}
+
+
 void v8::Object::SetPointerInInternalField(int index, void* value) {
   ENTER_V8;
-  i::Object* as_object = reinterpret_cast<i::Object*>(value);
-  if (as_object->IsSmi()) {
-    Utils::OpenHandle(this)->SetInternalField(index, as_object);
-    return;
+  if (CanBeEncodedAsSmi(value)) {
+    Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
+  } else {
+    HandleScope scope;
+    i::Handle<i::Proxy> proxy =
+        i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+    if (!proxy.is_null())
+        Utils::OpenHandle(this)->SetInternalField(index, *proxy);
   }
-  HandleScope scope;
-  i::Handle<i::Proxy> proxy =
-      i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
-  if (!proxy.is_null())
-      Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+  ASSERT_EQ(value, GetPointerFromInternalField(index));
 }
 
 
@@ -3299,7 +3316,8 @@
 
 HeapStatistics::HeapStatistics(): total_heap_size_(0),
                                   total_heap_size_executable_(0),
-                                  used_heap_size_(0) { }
+                                  used_heap_size_(0),
+                                  heap_size_limit_(0) { }
 
 
 void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
@@ -3307,6 +3325,7 @@
   heap_statistics->set_total_heap_size_executable(
       i::Heap::CommittedMemoryExecutable());
   heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
+  heap_statistics->set_heap_size_limit(i::Heap::MaxReserved());
 }
 
 
@@ -3560,11 +3579,13 @@
   LOG_API("External::Wrap");
   EnsureInitialized("v8::External::Wrap()");
   ENTER_V8;
-  i::Object* as_object = reinterpret_cast<i::Object*>(data);
-  if (as_object->IsSmi()) {
-    return Utils::ToLocal(i::Handle<i::Object>(as_object));
-  }
-  return ExternalNewImpl(data);
+
+  v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
+      ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
+      : v8::Local<v8::Value>(ExternalNewImpl(data));
+
+  ASSERT_EQ(data, Unwrap(result));
+  return result;
 }
 
 
@@ -3572,7 +3593,7 @@
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Object* value = obj->GetInternalField(index);
   if (value->IsSmi()) {
-    return value;
+    return i::Internals::GetExternalPointerFromSmi(value);
   } else if (value->IsProxy()) {
     return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
   } else {
@@ -3586,8 +3607,7 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
   void* result;
   if (obj->IsSmi()) {
-    // The external value was an aligned pointer.
-    result = *obj;
+    result = i::Internals::GetExternalPointerFromSmi(*obj);
   } else if (obj->IsProxy()) {
     result = ExternalValueImpl(obj);
   } else {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index fbe97ad..a7c1897 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2337,34 +2337,28 @@
 
 void Assembler::vcmp(const DwVfpRegister src1,
                      const DwVfpRegister src2,
-                     const SBit s,
                      const Condition cond) {
   // vcmp(Dd, Dm) double precision floating point comparison.
-  // We set bit E, as we want any NaN to set the cumulative exception flag
-  // in the FPSCR.
   // Instruction details available in ARM DDI 0406A, A8-570.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=1 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
-       src1.code()*B12 | 0x5*B9 | B8 | B7 | B6 | src2.code());
+       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
 }
 
 
 void Assembler::vcmp(const DwVfpRegister src1,
                      const double src2,
-                     const SBit s,
                      const Condition cond) {
   // vcmp(Dd, Dm) double precision floating point comparison.
   // Instruction details available in ARM DDI 0406A, A8-570.
-  // We set bit E, as we want any NaN to set the cumulative exception flag
-  // in the FPSCR.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=1 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(src2 == 0.0);
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
-       src1.code()*B12 | 0x5*B9 | B8 | B7 | B6);
+       src1.code()*B12 | 0x5*B9 | B8 | B6);
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 7e8c084..e0ea819 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -167,6 +167,9 @@
 struct DwVfpRegister {
   // d0 has been excluded from allocation. This is following ia32
   // where xmm0 is excluded. This should be revisited.
+  // Currently d0 is used as a scratch register.
+  // d1 has also been excluded from allocation to be used as a scratch
+  // register as well.
   static const int kNumRegisters = 16;
   static const int kNumAllocatableRegisters = 15;
 
@@ -298,12 +301,17 @@
 const DwVfpRegister d15 = { 15 };
 
 // VFP FPSCR constants.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
 static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPInvalidExceptionBit = 1;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
+static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
+static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
+
+static const uint32_t kVFPExceptionMask = 0xf;
 
 // Coprocessor register
 struct CRegister {
@@ -1147,11 +1155,9 @@
             const Condition cond = al);
   void vcmp(const DwVfpRegister src1,
             const DwVfpRegister src2,
-            const SBit s = LeaveCC,
             const Condition cond = al);
   void vcmp(const DwVfpRegister src1,
             const double src2,
-            const SBit s = LeaveCC,
             const Condition cond = al);
   void vmrs(const Register dst,
             const Condition cond = al);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 6480a91..0210b1b 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -502,7 +502,7 @@
 
   // Load the first arguments in r0 and get rid of the rest.
   Label no_arguments;
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(eq, &no_arguments);
   // First args = sp[(argc - 1) * 4].
   __ sub(r0, r0, Operand(1));
@@ -546,7 +546,7 @@
     __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
     __ Assert(eq, "Unexpected string wrapper instance size");
     __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
-    __ cmp(r4, Operand(0));
+    __ cmp(r4, Operand(0, RelocInfo::NONE));
     __ Assert(eq, "Unexpected unused properties of string wrapper");
   }
   __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index e72c5d3..8589cf0 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -866,8 +866,7 @@
       __ vldr(d0, scratch2, HeapNumber::kValueOffset);
       __ sub(probe, probe, Operand(kHeapObjectTag));
       __ vldr(d1, probe, HeapNumber::kValueOffset);
-      __ vcmp(d0, d1);
-      __ vmrs(pc);
+      __ VFPCompareAndSetFlags(d0, d1);
       __ b(ne, not_found);  // The cache did not contain this value.
       __ b(&load_result_from_cache);
     } else {
@@ -975,8 +974,7 @@
     CpuFeatures::Scope scope(VFP3);
     Label no_nan;
     // ARMv7 VFP3 instructions to implement double precision comparison.
-    __ vcmp(d7, d6);
-    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    __ VFPCompareAndSetFlags(d7, d6);
     Label nan;
     __ b(vs, &nan);
     __ mov(r0, Operand(EQUAL), LeaveCC, eq);
@@ -1096,8 +1094,7 @@
 
   __ sub(ip, tos_, Operand(kHeapObjectTag));
   __ vldr(d1, ip, HeapNumber::kValueOffset);
-  __ vcmp(d1, 0.0);
-  __ vmrs(pc);
+  __ VFPCompareAndSetFlags(d1, 0.0);
   // "tos_" is a register, and contains a non zero value by default.
   // Hence we only need to overwrite "tos_" with zero to return false for
   // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
@@ -2519,7 +2516,7 @@
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
     ExternalReference external_caught(Top::k_external_caught_exception_address);
-    __ mov(r0, Operand(false));
+    __ mov(r0, Operand(false, RelocInfo::NONE));
     __ mov(r2, Operand(external_caught));
     __ str(r0, MemOperand(r2));
 
@@ -4915,8 +4912,7 @@
     __ vldr(d1, r2, HeapNumber::kValueOffset);
 
     // Compare operands
-    __ vcmp(d0, d1);
-    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    __ VFPCompareAndSetFlags(d0, d1);
 
     // Don't base result on status bits when a NaN is involved.
     __ b(vs, &unordered);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index d41c1d2..4a982f6 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -4667,8 +4667,7 @@
     __ mov(scratch2, Operand(0x7FF00000));
     __ mov(scratch1, Operand(0, RelocInfo::NONE));
     __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
-    __ vcmp(d0, d1);
-    __ vmrs(pc);
+    __ VFPCompareAndSetFlags(d0, d1);
     runtime.Branch(eq);  // d0 reached infinity.
     __ vdiv(d0, d2, d0);
     __ b(&allocate_return);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3917d6d..8a53d1c 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -55,8 +55,9 @@
   SafepointTable table(function->code());
   for (unsigned i = 0; i < table.length(); i++) {
     unsigned pc_offset = table.GetPcOffset(i);
-    int deoptimization_index = table.GetDeoptimizationIndex(i);
-    int gap_code_size = table.GetGapCodeSize(i);
+    SafepointEntry safepoint_entry = table.GetEntry(i);
+    int deoptimization_index = safepoint_entry.deoptimization_index();
+    int gap_code_size = safepoint_entry.gap_code_size();
     // Check that we did not shoot past next safepoint.
     // TODO(srdjan): How do we guarantee that safepoint code does not
     // overlap other safepoint patching code?
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index e5a1bae..340bc1e 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1591,7 +1591,7 @@
       __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
 
       Label exponent_rebiased;
-      __ teq(r1, Operand(0x00));
+      __ teq(r1, Operand(0x00, RelocInfo::NONE));
       __ b(eq, &exponent_rebiased);
 
       __ teq(r1, Operand(0xff));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index e53e96d..df890ab 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -93,32 +93,6 @@
 }
 
 
-bool LParallelMove::IsRedundant() const {
-  for (int i = 0; i < move_operands_.length(); ++i) {
-    if (!move_operands_[i].IsRedundant()) return false;
-  }
-  return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
-  for (int i = move_operands_.length() - 1; i >= 0; --i) {
-    if (!move_operands_[i].IsEliminated()) {
-      LOperand* from = move_operands_[i].from();
-      LOperand* to = move_operands_[i].to();
-      if (from->Equals(to)) {
-        to->PrintTo(stream);
-      } else {
-        to->PrintTo(stream);
-        stream->Add(" = ");
-        from->PrintTo(stream);
-      }
-      stream->Add("; ");
-    }
-  }
-}
-
-
 bool LGap::IsRedundant() const {
   for (int i = 0; i < 4; i++) {
     if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
@@ -270,6 +244,11 @@
 }
 
 
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) const {
   stream->Add("[r2] #%d / ", arity());
 }
@@ -702,13 +681,6 @@
 }
 
 
-LOperand* LChunkBuilder::Temp() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
-  allocator_->RecordTemporary(operand);
-  return operand;
-}
-
-
 LUnallocated* LChunkBuilder::TempRegister() {
   LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
   allocator_->RecordTemporary(operand);
@@ -913,59 +885,6 @@
 }
 
 
-void LEnvironment::WriteTranslation(LCodeGen* cgen,
-                                    Translation* translation) const {
-  if (this == NULL) return;
-
-  // The translation includes one command per value in the environment.
-  int translation_size = values()->length();
-  // The output frame height does not include the parameters.
-  int height = translation_size - parameter_count();
-
-  outer()->WriteTranslation(cgen, translation);
-  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
-  translation->BeginFrame(ast_id(), closure_id, height);
-  for (int i = 0; i < translation_size; ++i) {
-    LOperand* value = values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (spilled_registers_ != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          spilled_registers_[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        cgen->AddToTranslation(translation,
-                               spilled_registers_[value->index()],
-                               HasTaggedValueAt(i));
-      } else if (value->IsDoubleRegister() &&
-                 spilled_double_registers_[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        cgen->AddToTranslation(translation,
-                               spilled_double_registers_[value->index()],
-                               false);
-      }
-    }
-
-    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
-  }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) const {
-  stream->Add("[id=%d|", ast_id());
-  stream->Add("[parameters=%d|", parameter_count());
-  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
-  for (int i = 0; i < values_.length(); ++i) {
-    if (i != 0) stream->Add(";");
-    if (values_[i] == NULL) {
-      stream->Add("[hole]");
-    } else {
-      values_[i]->PrintTo(stream);
-    }
-  }
-  stream->Add("]");
-}
-
-
 LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
   if (hydrogen_env == NULL) return NULL;
 
@@ -1024,7 +943,6 @@
 
       return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
                                        TempRegister(),
-                                       TempRegister(),
                                        first_id,
                                        second_id);
     } else if (v->IsCompare()) {
@@ -1032,22 +950,21 @@
       Token::Value op = compare->token();
       HValue* left = compare->left();
       HValue* right = compare->right();
-      if (left->representation().IsInteger32()) {
+      Representation r = compare->GetInputRepresentation();
+      if (r.IsInteger32()) {
+        ASSERT(left->representation().IsInteger32());
         ASSERT(right->representation().IsInteger32());
-        return new LCmpIDAndBranch(op,
-                                   UseRegisterAtStart(left),
+        return new LCmpIDAndBranch(UseRegisterAtStart(left),
                                    UseOrConstantAtStart(right),
                                    first_id,
-                                   second_id,
-                                   false);
-      } else if (left->representation().IsDouble()) {
+                                   second_id);
+      } else if (r.IsDouble()) {
+        ASSERT(left->representation().IsDouble());
         ASSERT(right->representation().IsDouble());
-        return new LCmpIDAndBranch(op,
-                                   UseRegisterAtStart(left),
+        return new LCmpIDAndBranch(UseRegisterAtStart(left),
                                    UseRegisterAtStart(right),
                                    first_id,
-                                   second_id,
-                                   true);
+                                   second_id);
       } else {
         ASSERT(left->representation().IsTagged());
         ASSERT(right->representation().IsTagged());
@@ -1085,7 +1002,6 @@
       ASSERT(compare->value()->representation().IsTagged());
 
       return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  compare->is_strict(),
                                   first_id,
                                   second_id);
     } else if (v->IsIsObject()) {
@@ -1209,7 +1125,8 @@
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   LOperand* input = UseRegisterAtStart(instr->value());
-  LInstruction* result = new LUnaryMathOperation(input);
+  LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+  LInstruction* result = new LUnaryMathOperation(input, temp);
   switch (op) {
     case kMathAbs:
       return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1326,12 +1243,15 @@
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
   } else if (instr->representation().IsInteger32()) {
-    // The temporary operand is necessary to ensure that right is not allocated
-    // into edx.
-    FixedTemp(r1);
+    // TODO(1042) The fixed register allocation
+    // is needed because we call GenericBinaryOpStub from
+    // the generated code, which requires registers r0
+    // and r1 to be used. We should remove that
+    // when we provide a native implementation.
     LOperand* value = UseFixed(instr->left(), r0);
-    LOperand* divisor = UseRegister(instr->right());
-    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0));
+    LOperand* divisor = UseFixed(instr->right(), r1);
+    return AssignEnvironment(AssignPointerMap(
+             DefineFixed(new LDivI(value, divisor), r0)));
   } else {
     return DoArithmeticT(Token::DIV, instr);
   }
@@ -1340,18 +1260,17 @@
 
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsInteger32()) {
+    // TODO(1042) The fixed register allocation
+    // is needed because we call GenericBinaryOpStub from
+    // the generated code, which requires registers r0
+    // and r1 to be used. We should remove that
+    // when we provide a native implementation.
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    // The temporary operand is necessary to ensure that right is not allocated
-    // into edx.
-    FixedTemp(r1);
     LOperand* value = UseFixed(instr->left(), r0);
-    LOperand* divisor = UseRegister(instr->right());
-    LInstruction* result = DefineFixed(new LModI(value, divisor), r1);
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
-        instr->CheckFlag(HValue::kCanBeDivByZero)) {
-      result = AssignEnvironment(result);
-    }
+    LOperand* divisor = UseFixed(instr->right(), r1);
+    LInstruction* result = DefineFixed(new LModI(value, divisor), r0);
+    result = AssignEnvironment(AssignPointerMap(result));
     return result;
   } else if (instr->representation().IsTagged()) {
     return DoArithmeticT(Token::MOD, instr);
@@ -1437,17 +1356,22 @@
 
 LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
   Token::Value op = instr->token();
-  if (instr->left()->representation().IsInteger32()) {
+  Representation r = instr->GetInputRepresentation();
+  if (r.IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(op, left, right, false));
-  } else if (instr->left()->representation().IsDouble()) {
+    return DefineAsRegister(new LCmpID(left, right));
+  } else if (r.IsDouble()) {
+    ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(op, left, right, true));
+    return DefineAsRegister(new LCmpID(left, right));
   } else {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
     bool reversed = (op == Token::GT || op == Token::LTE);
     LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
     LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
@@ -1470,8 +1394,7 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LIsNull(value,
-                                      instr->is_strict()));
+  return DefineAsRegister(new LIsNull(value));
 }
 
 
@@ -1511,8 +1434,7 @@
 LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseTempRegister(instr->value());
-
-  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+  return DefineSameAsFirst(new LClassOfTest(value));
 }
 
 
@@ -1625,11 +1547,7 @@
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LInstruction* result =
-      new LCheckPrototypeMaps(temp1,
-                              temp2,
-                              instr->holder(),
-                              instr->receiver_map());
+  LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
   return AssignEnvironment(result);
 }
 
@@ -1669,7 +1587,7 @@
   } else if (r.IsTagged()) {
     return DefineAsRegister(new LConstantT(instr->handle()));
   } else {
-    Abort("unsupported constant of type double");
+    UNREACHABLE();
     return NULL;
   }
 }
@@ -1688,6 +1606,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  return DefineAsRegister(new LLoadContextSlot);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   return DefineAsRegister(
       new LLoadNamedField(UseRegisterAtStart(instr->object())));
@@ -1716,23 +1639,12 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
     HLoadKeyedFastElement* instr) {
-  Representation r = instr->representation();
-  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterAtStart(instr->key());
-  LOperand* load_result = NULL;
-  // Double needs an extra temp, because the result is converted from heap
-  // number to a double register.
-  if (r.IsDouble()) load_result = TempRegister();
-  LInstruction* result = new LLoadKeyedFastElement(obj,
-                                                   key,
-                                                   load_result);
-  if (r.IsDouble()) {
-    result = DefineAsRegister(result);
-  } else {
-    result = DefineSameAsFirst(result);
-  }
-  return AssignEnvironment(result);
+  LInstruction* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
@@ -1789,13 +1701,7 @@
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
 
-  return new LStoreNamedField(obj,
-                              instr->name(),
-                              val,
-                              instr->is_in_object(),
-                              instr->offset(),
-                              needs_write_barrier,
-                              instr->transition());
+  return new LStoreNamedField(obj, val);
 }
 
 
@@ -1803,7 +1709,7 @@
   LOperand* obj = UseFixed(instr->object(), r1);
   LOperand* val = UseFixed(instr->value(), r0);
 
-  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  LInstruction* result = new LStoreNamedGeneric(obj, val);
   return MarkAsCall(result, instr);
 }
 
@@ -1829,8 +1735,9 @@
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LInstruction* result = new LDeleteProperty(Use(instr->object()),
-                                             UseOrConstant(instr->key()));
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LInstruction* result = new LDeleteProperty(object, key);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -1944,21 +1851,4 @@
 }
 
 
-void LPointerMap::RecordPointer(LOperand* op) {
-  // Do not record arguments as pointers.
-  if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) const {
-  stream->Add("{");
-  for (int i = 0; i < pointer_operands_.length(); ++i) {
-    if (i != 0) stream->Add(";");
-    pointer_operands_[i]->PrintTo(stream);
-  }
-  stream->Add("} @%d", position());
-}
-
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 4ddb281..c6b89a5 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -38,8 +38,6 @@
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
 
 
 // Type hierarchy:
@@ -78,6 +76,7 @@
 //   LCallNamed
 //   LCallRuntime
 //   LCallStub
+//   LCheckPrototypeMaps
 //   LConstant
 //     LConstantD
 //     LConstantI
@@ -87,7 +86,8 @@
 //   LGlobalObject
 //   LGlobalReceiver
 //   LLabel
-//   LLayzBailout
+//   LLazyBailout
+//   LLoadContextSlot
 //   LLoadGlobal
 //   LMaterializedLiteral
 //     LArrayLiteral
@@ -111,7 +111,6 @@
 //     LCheckFunction
 //     LCheckInstanceType
 //     LCheckMap
-//     LCheckPrototypeMaps
 //     LCheckSmi
 //     LClassOfTest
 //     LClassOfTestAndBranch
@@ -223,6 +222,7 @@
   V(ClassOfTestAndBranch)                       \
   V(Label)                                      \
   V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadGlobal)                                 \
   V(LoadKeyedFastElement)                       \
@@ -332,27 +332,6 @@
 };
 
 
-class LParallelMove : public ZoneObject {
- public:
-  LParallelMove() : move_operands_(4) { }
-
-  void AddMove(LOperand* from, LOperand* to) {
-    move_operands_.Add(LMoveOperands(from, to));
-  }
-
-  bool IsRedundant() const;
-
-  const ZoneList<LMoveOperands>* move_operands() const {
-    return &move_operands_;
-  }
-
-  void PrintDataTo(StringStream* stream) const;
-
- private:
-  ZoneList<LMoveOperands> move_operands_;
-};
-
-
 class LGap: public LInstruction {
  public:
   explicit LGap(HBasicBlock* block)
@@ -466,6 +445,10 @@
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
   DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
+  }
 };
 
 
@@ -602,29 +585,26 @@
 
 class LCmpID: public LBinaryOperation {
  public:
-  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
-      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+  LCmpID(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
 
-  Token::Value op() const { return op_; }
-  bool is_double() const { return is_double_; }
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-
- private:
-  Token::Value op_;
-  bool is_double_;
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
 };
 
 
 class LCmpIDAndBranch: public LCmpID {
  public:
-  LCmpIDAndBranch(Token::Value op,
-                  LOperand* left,
+  LCmpIDAndBranch(LOperand* left,
                   LOperand* right,
                   int true_block_id,
-                  int false_block_id,
-                  bool is_double)
-      : LCmpID(op, left, right, is_double),
+                  int false_block_id)
+      : LCmpID(left, right),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
@@ -643,14 +623,18 @@
 
 class LUnaryMathOperation: public LUnaryOperation {
  public:
-  explicit LUnaryMathOperation(LOperand* value)
-      : LUnaryOperation(value) { }
+  explicit LUnaryMathOperation(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 
   virtual void PrintDataTo(StringStream* stream) const;
   BuiltinFunctionId op() const { return hydrogen()->op(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
 };
 
 
@@ -687,25 +671,21 @@
 
 class LIsNull: public LUnaryOperation {
  public:
-  LIsNull(LOperand* value, bool is_strict)
-      : LUnaryOperation(value), is_strict_(is_strict) {}
+  explicit LIsNull(LOperand* value) : LUnaryOperation(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+  DECLARE_HYDROGEN_ACCESSOR(IsNull);
 
-  bool is_strict() const { return is_strict_; }
-
- private:
-  bool is_strict_;
+  bool is_strict() const { return hydrogen()->is_strict(); }
 };
 
 
 class LIsNullAndBranch: public LIsNull {
  public:
   LIsNullAndBranch(LOperand* value,
-                   bool is_strict,
                    int true_block_id,
                    int false_block_id)
-      : LIsNull(value, is_strict),
+      : LIsNull(value),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
@@ -865,18 +845,12 @@
 
 class LClassOfTest: public LUnaryOperation {
  public:
-  LClassOfTest(LOperand* value, LOperand* temp)
-      : LUnaryOperation(value), temporary_(temp) {}
+  explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
 
   virtual void PrintDataTo(StringStream* stream) const;
-
-  LOperand* temporary() { return temporary_; }
-
- private:
-  LOperand *temporary_;
 };
 
 
@@ -884,11 +858,10 @@
  public:
   LClassOfTestAndBranch(LOperand* value,
                         LOperand* temporary,
-                        LOperand* temporary2,
                         int true_block_id,
                         int false_block_id)
-      : LClassOfTest(value, temporary),
-        temporary2_(temporary2),
+      : LClassOfTest(value),
+        temporary_(temporary),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
@@ -899,10 +872,10 @@
 
   int true_block_id() const { return true_block_id_; }
   int false_block_id() const { return false_block_id_; }
-  LOperand* temporary2() { return temporary2_; }
+  LOperand* temporary() { return temporary_; }
 
  private:
-  LOperand* temporary2_;
+  LOperand* temporary_;
   int true_block_id_;
   int false_block_id_;
 };
@@ -1263,21 +1236,14 @@
 
 class LLoadKeyedFastElement: public LBinaryOperation {
  public:
-  LLoadKeyedFastElement(LOperand* elements,
-                        LOperand* key,
-                        LOperand* load_result)
-      : LBinaryOperation(elements, key),
-        load_result_(load_result) { }
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key)
+      : LBinaryOperation(elements, key) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
 
   LOperand* elements() const { return left(); }
   LOperand* key() const { return right(); }
-  LOperand* load_result() const { return load_result_; }
-
- private:
-  LOperand* load_result_;
 };
 
 
@@ -1309,6 +1275,20 @@
 };
 
 
+class LLoadContextSlot: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int context_chain_length() const {
+    return hydrogen()->context_chain_length();
+  }
+  int slot_index() const { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LPushArgument: public LUnaryOperation {
  public:
   explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
@@ -1513,63 +1493,46 @@
 
 class LStoreNamed: public LInstruction {
  public:
-  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
-      : object_(obj), name_(name), value_(val) { }
+  LStoreNamed(LOperand* obj, LOperand* val)
+      : object_(obj), value_(val) { }
 
   DECLARE_INSTRUCTION(StoreNamed)
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
 
   virtual void PrintDataTo(StringStream* stream) const;
 
   LOperand* object() const { return object_; }
-  Handle<Object> name() const { return name_; }
+  Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() const { return value_; }
 
  private:
   LOperand* object_;
-  Handle<Object> name_;
   LOperand* value_;
 };
 
 
 class LStoreNamedField: public LStoreNamed {
  public:
-  LStoreNamedField(LOperand* obj,
-                   Handle<Object> name,
-                   LOperand* val,
-                   bool in_object,
-                   int offset,
-                   bool needs_write_barrier,
-                   Handle<Map> transition)
-      : LStoreNamed(obj, name, val),
-        is_in_object_(in_object),
-        offset_(offset),
-        needs_write_barrier_(needs_write_barrier),
-        transition_(transition) { }
+  LStoreNamedField(LOperand* obj, LOperand* val)
+      : LStoreNamed(obj, val) { }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  bool is_in_object() { return is_in_object_; }
-  int offset() { return offset_; }
-  bool needs_write_barrier() { return needs_write_barrier_; }
-  Handle<Map> transition() const { return transition_; }
-  void set_transition(Handle<Map> map) { transition_ = map; }
-
- private:
-  bool is_in_object_;
-  int offset_;
-  bool needs_write_barrier_;
-  Handle<Map> transition_;
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+  Handle<Map> transition() { return hydrogen()->transition(); }
 };
 
 
 class LStoreNamedGeneric: public LStoreNamed {
  public:
-  LStoreNamedGeneric(LOperand* obj,
-                     Handle<Object> name,
-                     LOperand* val)
-      : LStoreNamed(obj, name, val) { }
+  LStoreNamedGeneric(LOperand* obj, LOperand* val)
+      : LStoreNamed(obj, val) { }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 };
 
 
@@ -1647,27 +1610,21 @@
 
 class LCheckPrototypeMaps: public LInstruction {
  public:
-  LCheckPrototypeMaps(LOperand* temp1,
-                      LOperand* temp2,
-                      Handle<JSObject> holder,
-                      Handle<Map> receiver_map)
-      : temp1_(temp1),
-        temp2_(temp2),
-        holder_(holder),
-        receiver_map_(receiver_map) { }
+  LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)
+      : temp1_(temp1), temp2_(temp2) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
 
   LOperand* temp1() const { return temp1_; }
   LOperand* temp2() const { return temp2_; }
-  Handle<JSObject> holder() const { return holder_; }
-  Handle<Map> receiver_map() const { return receiver_map_; }
 
  private:
   LOperand* temp1_;
   LOperand* temp2_;
-  Handle<JSObject> holder_;
-  Handle<Map> receiver_map_;
 };
 
 
@@ -1807,108 +1764,6 @@
 };
 
 
-class LPointerMap: public ZoneObject {
- public:
-  explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
-  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
-  int position() const { return position_; }
-  int lithium_position() const { return lithium_position_; }
-
-  void set_lithium_position(int pos) {
-    ASSERT(lithium_position_ == -1);
-    lithium_position_ = pos;
-  }
-
-  void RecordPointer(LOperand* op);
-  void PrintTo(StringStream* stream) const;
-
- private:
-  ZoneList<LOperand*> pointer_operands_;
-  int position_;
-  int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
-  LEnvironment(Handle<JSFunction> closure,
-               int ast_id,
-               int parameter_count,
-               int argument_count,
-               int value_count,
-               LEnvironment* outer)
-      : closure_(closure),
-        arguments_stack_height_(argument_count),
-        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
-        translation_index_(-1),
-        ast_id_(ast_id),
-        parameter_count_(parameter_count),
-        values_(value_count),
-        representations_(value_count),
-        spilled_registers_(NULL),
-        spilled_double_registers_(NULL),
-        outer_(outer) {
-  }
-
-  Handle<JSFunction> closure() const { return closure_; }
-  int arguments_stack_height() const { return arguments_stack_height_; }
-  int deoptimization_index() const { return deoptimization_index_; }
-  int translation_index() const { return translation_index_; }
-  int ast_id() const { return ast_id_; }
-  int parameter_count() const { return parameter_count_; }
-  const ZoneList<LOperand*>* values() const { return &values_; }
-  LEnvironment* outer() const { return outer_; }
-
-  void AddValue(LOperand* operand, Representation representation) {
-    values_.Add(operand);
-    representations_.Add(representation);
-  }
-
-  bool HasTaggedValueAt(int index) const {
-    return representations_[index].IsTagged();
-  }
-
-  void Register(int deoptimization_index, int translation_index) {
-    ASSERT(!HasBeenRegistered());
-    deoptimization_index_ = deoptimization_index;
-    translation_index_ = translation_index;
-  }
-  bool HasBeenRegistered() const {
-    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
-  }
-
-  void SetSpilledRegisters(LOperand** registers,
-                           LOperand** double_registers) {
-    spilled_registers_ = registers;
-    spilled_double_registers_ = double_registers;
-  }
-
-  // Emit frame translation commands for this environment.
-  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
-
-  void PrintTo(StringStream* stream) const;
-
- private:
-  Handle<JSFunction> closure_;
-  int arguments_stack_height_;
-  int deoptimization_index_;
-  int translation_index_;
-  int ast_id_;
-  int parameter_count_;
-  ZoneList<LOperand*> values_;
-  ZoneList<Representation> representations_;
-
-  // Allocation index indexed arrays of spill slot operands for registers
-  // that are also in spill slots at an OSR entry.  NULL for environments
-  // that do not correspond to an OSR entry.
-  LOperand** spilled_registers_;
-  LOperand** spilled_double_registers_;
-
-  LEnvironment* outer_;
-};
-
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2069,8 +1924,6 @@
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
 
-  // Temporary operand that may be a memory location.
-  LOperand* Temp();
   // Temporary operand that must be in a register.
   LUnallocated* TempRegister();
   LOperand* FixedTemp(Register reg);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index f53cebb..dca95f2 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,6 +54,157 @@
 };
 
 
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+    : nodes_(32),
+      identified_cycles_(4),
+      result_(16),
+      next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+    const ZoneList<LMoveOperands>* moves,
+    LOperand* marker_operand) {
+  nodes_.Rewind(0);
+  identified_cycles_.Rewind(0);
+  result_.Rewind(0);
+  next_visited_id_ = 0;
+
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i], marker_operand);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+  ZoneList<LOperand*> cycle_operands(8);
+  cycle_operands.Add(marker_operand);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    cycle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  cycle_operands.Add(marker_operand);
+
+  for (int i = cycle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = cycle_operands[i];
+    LOperand* to = cycle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a cycle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
@@ -324,6 +475,45 @@
 }
 
 
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
 void LCodeGen::AddToTranslation(Translation* translation,
                                 LOperand* op,
                                 bool is_tagged) {
@@ -439,7 +629,7 @@
       ++frame_count;
     }
     Translation translation(&translations_, frame_count);
-    environment->WriteTranslation(this, &translation);
+    WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     environment->Register(deoptimization_index, translation.index());
     deoptimizations_.Add(environment);
@@ -575,6 +765,27 @@
 }
 
 
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+    LPointerMap* pointers,
+    int arguments,
+    int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegistersAndDoubles(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register cp always contains a pointer to the context.
+  safepoint.DefinePointerRegister(cp);
+}
+
+
 void LCodeGen::RecordPosition(int position) {
   if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
@@ -601,8 +812,8 @@
   Register core_scratch = scratch0();
   bool destroys_core_scratch = false;
 
-  LGapResolver resolver(move->move_operands(), &marker_operand);
-  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  const ZoneList<LMoveOperands>* moves =
+      resolver_.Resolve(move->move_operands(), &marker_operand);
   for (int i = moves->length() - 1; i >= 0; --i) {
     LMoveOperands move = moves->at(i);
     LOperand* from = move.from();
@@ -771,7 +982,9 @@
       break;
     }
     case CodeStub::TranscendentalCache: {
-      Abort("TranscendentalCache unimplemented.");
+      __ ldr(r0, MemOperand(sp, 0));
+      TranscendentalCacheStub stub(instr->transcendental_type());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
     default:
@@ -786,12 +999,157 @@
 
 
 void LCodeGen::DoModI(LModI* instr) {
-  Abort("DoModI unimplemented.");
+  Abort("ModI not implemented");
+  class DeferredModI: public LDeferredCode {
+   public:
+    DeferredModI(LCodeGen* codegen, LModI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
+    }
+   private:
+    LModI* instr_;
+  };
+  // These registers hold untagged 32 bit values.
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  Label deoptimize, done;
+  // Check for x % 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ tst(right, Operand(right));
+    __ b(eq, &deoptimize);
+  }
+
+  // Check for (0 % -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label ok;
+    __ tst(left, Operand(left));
+    __ b(ne, &ok);
+    __ tst(right, Operand(right));
+    __ b(pl, &ok);
+    __ b(al, &deoptimize);
+    __ bind(&ok);
+  }
+
+  // Call the generic stub. The numbers in r0 and r1 have
+  // to be tagged to Smis. If that is not possible, deoptimize.
+  DeferredModI* deferred = new DeferredModI(this, instr);
+  __ TrySmiTag(left, &deoptimize, scratch);
+  __ TrySmiTag(right, &deoptimize, scratch);
+
+  __ b(al, deferred->entry());
+  __ bind(deferred->exit());
+
+  // If the result in r0 is a Smi, untag it, else deoptimize.
+  __ BranchOnNotSmi(result, &deoptimize);
+  __ mov(result, Operand(result, ASR, 1));
+
+  __ b(al, &done);
+  __ bind(&deoptimize);
+  DeoptimizeIf(al, instr->environment());
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoDivI(LDivI* instr) {
-  Abort("DoDivI unimplemented.");
+  Abort("DivI not implemented");
+  class DeferredDivI: public LDeferredCode {
+   public:
+    DeferredDivI(LCodeGen* codegen, LDivI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
+    }
+   private:
+    LDivI* instr_;
+  };
+
+  const Register left = ToRegister(instr->left());
+  const Register right = ToRegister(instr->right());
+  const Register scratch = scratch0();
+  const Register result = ToRegister(instr->result());
+
+  // Check for x / 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ tst(right, right);
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ tst(left, Operand(left));
+    __ b(ne, &left_not_zero);
+    __ tst(right, Operand(right));
+    DeoptimizeIf(mi, instr->environment());
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    Label left_not_min_int;
+    __ cmp(left, Operand(kMinInt));
+    __ b(ne, &left_not_min_int);
+    __ cmp(right, Operand(-1));
+    DeoptimizeIf(eq, instr->environment());
+    __ bind(&left_not_min_int);
+  }
+
+  Label done, deoptimize;
+  // Test for a few common cases first.
+  __ cmp(right, Operand(1));
+  __ mov(result, left, LeaveCC, eq);
+  __ b(eq, &done);
+
+  __ cmp(right, Operand(2));
+  __ tst(left, Operand(1), eq);
+  __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
+  __ b(eq, &done);
+
+  __ cmp(right, Operand(4));
+  __ tst(left, Operand(3), eq);
+  __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
+  __ b(eq, &done);
+
+  // Call the generic stub. The numbers in r0 and r1 have
+  // to be tagged to Smis. If that is not possible, deoptimize.
+  DeferredDivI* deferred = new DeferredDivI(this, instr);
+
+  __ TrySmiTag(left, &deoptimize, scratch);
+  __ TrySmiTag(right, &deoptimize, scratch);
+
+  __ b(al, deferred->entry());
+  __ bind(deferred->exit());
+
+  // If the result in r0 is a Smi, untag it, else deoptimize.
+  __ BranchOnNotSmi(result, &deoptimize);
+  __ SmiUntag(result);
+  __ b(&done);
+
+  __ bind(&deoptimize);
+  DeoptimizeIf(al, instr->environment());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr,
+                                           Token::Value op) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  __ PushSafepointRegistersAndDoubles();
+  GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
+  __ CallStub(&stub);
+  RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
+                                         0,
+                                         Safepoint::kNoDeoptimizationIndex);
+  // Overwrite the stored value of r0 with the result of the stub.
+  __ str(r0, MemOperand(sp, DwVfpRegister::kNumAllocatableRegisters *
+                        kDoubleSize));
+  __ PopSafepointRegistersAndDoubles();
 }
 
 
@@ -956,12 +1314,26 @@
   Register result = ToRegister(instr->result());
   Register array = ToRegister(instr->input());
   __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
-  Abort("DoFixedArrayLength untested.");
 }
 
 
 void LCodeGen::DoValueOf(LValueOf* instr) {
-  Abort("DoValueOf unimplemented.");
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->temporary());
+  ASSERT(input.is(result));
+  Label done;
+
+  // If the object is a smi return the object.
+  __ tst(input, Operand(kSmiTagMask));
+  __ b(eq, &done);
+
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+  __ b(ne, &done);
+  __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
 }
 
 
@@ -969,7 +1341,6 @@
   LOperand* input = instr->input();
   ASSERT(input->Equals(instr->result()));
   __ mvn(ToRegister(input), Operand(ToRegister(input)));
-  Abort("DoBitNotI untested.");
 }
 
 
@@ -1078,14 +1449,9 @@
     DoubleRegister reg = ToDoubleRegister(instr->input());
     Register scratch = scratch0();
 
-    // Test for the double value. Zero and NaN are false.
-    // Clear the Invalid cumulative exception flags.
-    __ ClearFPSCRBits(kVFPInvalidExceptionBit, scratch);
-    __ vcmp(reg, 0.0);
-      // Retrieve the exception and status flags and
-      // check for zero or an invalid exception.
-    __ vmrs(scratch);
-    __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPInvalidExceptionBit));
+    // Test the double value. Zero and NaN are false.
+    __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
+    __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
     EmitBranch(true_block, false_block, ne);
   } else {
     ASSERT(r.IsTagged());
@@ -1112,7 +1478,7 @@
       __ tst(reg, Operand(kSmiTagMask));
       __ b(eq, true_label);
 
-      // Test for double values. Zero and NaN are false.
+      // Test double values. Zero and NaN are false.
       Label call_stub;
       DoubleRegister dbl_scratch = d0;
       Register scratch = scratch0();
@@ -1122,13 +1488,8 @@
       __ b(ne, &call_stub);
       __ sub(ip, reg, Operand(kHeapObjectTag));
       __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
-      // Clear the Invalid cumulative exception flags.
-      __ ClearFPSCRBits(kVFPInvalidExceptionBit, scratch);
-      __ vcmp(dbl_scratch, 0.0);
-      // Retrieve the exception and status flags and
-      // check for zero or an invalid exception.
-      __ vmrs(scratch);
-      __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPInvalidExceptionBit));
+      __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
+      __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
       __ b(ne, false_label);
       __ b(true_label);
 
@@ -1148,24 +1509,47 @@
 
 
 void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
-  // TODO(srdjan): Perform stack overflow check if this goto needs it
-  // before jumping.
   block = chunk_->LookupDestination(block);
   int next_block = GetNextEmittedBlock(current_block_);
   if (block != next_block) {
-    __ jmp(chunk_->GetAssemblyLabel(block));
+    // Perform stack overflow check if this goto needs it before jumping.
+    if (deferred_stack_check != NULL) {
+      __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+      __ cmp(sp, Operand(ip));
+      __ b(hs, chunk_->GetAssemblyLabel(block));
+      __ jmp(deferred_stack_check->entry());
+      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+    } else {
+      __ jmp(chunk_->GetAssemblyLabel(block));
+    }
   }
 }
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  UNIMPLEMENTED();
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ PopSafepointRegisters();
 }
 
 
 void LCodeGen::DoGoto(LGoto* instr) {
-  // TODO(srdjan): Implement deferred stack check.
-  EmitGoto(instr->block_id(), NULL);
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LGoto* instr_;
+  };
+
+  DeferredStackCheck* deferred = NULL;
+  if (instr->include_stack_check()) {
+    deferred = new DeferredStackCheck(this, instr);
+  }
+  EmitGoto(instr->block_id(), deferred);
 }
 
 
@@ -1393,7 +1777,7 @@
 }
 
 
-// Branches to a label or falls through with the answer in the z flag.  Trashes
+// Branches to a label or falls through with the answer in flags.  Trashes
 // the temp registers, but not the input.  Only input and temp2 may alias.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
@@ -1401,17 +1785,91 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  Abort("EmitClassOfTest unimplemented.");
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ tst(input, Operand(kSmiTagMask));
+  __ b(eq, is_false);
+  __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, is_false);
+
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    __ b(eq, is_true);
+  } else {
+    __ b(eq, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ b(ne, is_true);
+  } else {
+    __ b(ne, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+  __ cmp(temp, Operand(class_name));
+  // End with the answer in flags.
 }
 
 
 void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
-  Abort("DoClassOfTest unimplemented.");
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  ASSERT(input.is(result));
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  Label done, is_true, is_false;
+
+  EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
+  __ b(ne, &is_false);
+
+  __ bind(&is_true);
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&is_false);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
-  Abort("DoClassOfTestAndBranch unimplemented.");
+  Register input = ToRegister(instr->input());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temporary());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, eq);
 }
 
 
@@ -1482,10 +1940,12 @@
     condition = ReverseCondition(condition);
   }
   __ cmp(r0, Operand(0));
-  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex,
-      condition);
-  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex,
-      NegateCondition(condition));
+  __ LoadRoot(ToRegister(instr->result()),
+              Heap::kTrueValueRootIndex,
+              condition);
+  __ LoadRoot(ToRegister(instr->result()),
+              Heap::kFalseValueRootIndex,
+              NegateCondition(condition));
 }
 
 
@@ -1528,6 +1988,14 @@
 }
 
 
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  // TODO(antonm): load a context with a separate instruction.
+  Register result = ToRegister(instr->result());
+  __ LoadContext(result, instr->context_chain_length());
+  __ ldr(result, ContextOperand(result, instr->slot_index()));
+}
+
+
 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   Register object = ToRegister(instr->input());
   Register result = ToRegister(instr->result());
@@ -1636,36 +2104,18 @@
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
   Register elements = ToRegister(instr->elements());
   Register key = EmitLoadRegister(instr->key(), scratch0());
-  Register result;
+  Register result = ToRegister(instr->result());
   Register scratch = scratch0();
-
-  if (instr->load_result() != NULL) {
-    result = ToRegister(instr->load_result());
-  } else {
-    result = ToRegister(instr->result());
-    ASSERT(result.is(elements));
-  }
+  ASSERT(result.is(elements));
 
   // Load the result.
   __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
 
-  Representation r = instr->hydrogen()->representation();
-  if (r.IsInteger32()) {
-    // Untag and check for smi.
-    __ SmiUntag(result);
-    DeoptimizeIf(cs, instr->environment());
-  } else if (r.IsDouble()) {
-    EmitNumberUntagD(result,
-                     ToDoubleRegister(instr->result()),
-                     instr->environment());
-  } else {
-    // Check for the hole value.
-    ASSERT(r.IsTagged());
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-    __ cmp(result, scratch);
-    DeoptimizeIf(eq, instr->environment());
-  }
+  // Check for the hole value.
+  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+  __ cmp(result, scratch);
+  DeoptimizeIf(eq, instr->environment());
 }
 
 
@@ -1718,7 +2168,65 @@
 
 
 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Abort("DoApplyArguments unimplemented.");
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register scratch = scratch0();
+
+  ASSERT(receiver.is(r0));
+  ASSERT(function.is(r1));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  // If the receiver is null or undefined, we have to pass the
+  // global object as a receiver.
+  Label global_receiver, receiver_ok;
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ b(eq, &global_receiver);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ b(ne, &receiver_ok);
+  __ bind(&global_receiver);
+  __ ldr(receiver, GlobalObjectOperand());
+  __ bind(&receiver_ok);
+
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+
+  Label invoke;
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, Operand(kArgumentsLimit));
+  DeoptimizeIf(hi, instr->environment());
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ mov(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ add(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label loop;
+  // length is a small non-negative integer, due to the test above.
+  __ tst(length, Operand(length));
+  __ b(eq, &invoke);
+  __ bind(&loop);
+  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
+  __ push(scratch);
+  __ sub(length, length, Operand(1), SetCC);
+  __ b(ne, &loop);
+
+  __ bind(&invoke);
+  // Invoke the function. The number of arguments is stored in receiver
+  // which is r0, as expected by InvokeFunction.
+  v8::internal::ParameterCount actual(receiver);
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
 }
 
 
@@ -1797,12 +2305,44 @@
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  Abort("DoMathFloor unimplemented.");
+  DoubleRegister input = ToDoubleRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register prev_fpscr = ToRegister(instr->temp());
+  SwVfpRegister single_scratch = double_scratch0().low();
+  Register scratch = scratch0();
+
+  // Set custom FPCSR:
+  //  - Set rounding mode to "Round towards Minus Infinity".
+  //  - Clear vfp cumulative exception flags.
+  //  - Make sure Flush-to-zero mode control bit is unset.
+  __ vmrs(prev_fpscr);
+  __ bic(scratch, prev_fpscr,
+      Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
+  __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+  __ vmsr(scratch);
+
+  // Convert the argument to an integer.
+  __ vcvt_s32_f64(single_scratch,
+                  input,
+                  Assembler::FPSCRRounding,
+                  al);
+
+  // Retrieve FPSCR and check for vfp exceptions.
+  __ vmrs(scratch);
+  // Restore FPSCR
+  __ vmsr(prev_fpscr);
+  __ tst(scratch, Operand(kVFPExceptionMask));
+  DeoptimizeIf(ne, instr->environment());
+
+  // Move the result back to general purpose register r0.
+  __ vmov(result, single_scratch);
 }
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  Abort("DoMathSqrt unimplemented.");
+  DoubleRegister input = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input));
+  __ vsqrt(input, input);
 }
 
 
@@ -1976,7 +2516,19 @@
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  Abort("DoInteger32ToDouble unimplemented.");
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  SwVfpRegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ ldr(scratch, ToMemOperand(input));
+    __ vmov(single_scratch, scratch);
+  } else {
+    __ vmov(single_scratch, ToRegister(input));
+  }
+  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
 }
 
 
@@ -2197,7 +2749,7 @@
     __ bind(&heap_number);
     __ sub(ip, input_reg, Operand(kHeapObjectTag));
     __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
-    __ vcmp(dbl_tmp, 0.0);  // Sets overflow bit if NaN.
+    __ vcmp(dbl_tmp, 0.0);  // Sets overflow bit in FPSCR flags if NaN.
     __ vcvt_s32_f64(flt_scratch, dbl_tmp);
     __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
     __ vmrs(pc);  // Move vector status bits to normal status bits.
@@ -2218,8 +2770,7 @@
     // back to check; note that using non-overlapping s and d regs would be
     // slightly faster.
     __ vcvt_f64_s32(dbl_scratch, flt_scratch);
-    __ vcmp(dbl_scratch, dbl_tmp);
-    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp);
     DeoptimizeIf(ne, instr->environment());  // Not equal or unordered.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ tst(input_reg, Operand(input_reg));
@@ -2322,14 +2873,15 @@
 }
 
 
-void LCodeGen::LoadPrototype(Register result,
-                             Handle<JSObject> prototype) {
-  if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result,
+                              Handle<HeapObject> object) {
+  if (Heap::InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
-        Factory::NewJSGlobalPropertyCell(prototype);
+        Factory::NewJSGlobalPropertyCell(object);
     __ mov(result, Operand(cell));
+    __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(result, Operand(prototype));
+    __ mov(result, Operand(object));
   }
 }
 
@@ -2339,11 +2891,10 @@
   Register temp2 = ToRegister(instr->temp2());
 
   Handle<JSObject> holder = instr->holder();
-  Handle<Map> receiver_map = instr->receiver_map();
-  Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+  Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadPrototype(temp1, current_prototype);
+  LoadHeapObject(temp1, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -2353,7 +2904,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadPrototype(temp1, current_prototype);
+    LoadHeapObject(temp1, current_prototype);
   }
 
   // Check the holder map.
@@ -2624,7 +3175,14 @@
 
 
 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
-  Abort("DoDeleteProperty unimplemented.");
+  Register object = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(object, key);
+  RecordPosition(instr->pointer_map()->position());
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 608efa9..9eed64b 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,8 +39,30 @@
 
 // Forward declarations.
 class LDeferredCode;
+class LGapNode;
 class SafepointGenerator;
 
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver();
+  const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+                                         LOperand* marker_operand);
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  int next_visited_id_;
+};
+
 
 class LCodeGen BASE_EMBEDDED {
  public:
@@ -71,6 +93,7 @@
   void FinishCode(Handle<Code> code);
 
   // Deferred code support.
+  void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op);
   void DoDeferredNumberTagD(LNumberTagD* instr);
   void DoDeferredNumberTagI(LNumberTagI* instr);
   void DoDeferredTaggedToI(LTaggedToI* instr);
@@ -80,6 +103,9 @@
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
 
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
   // Declare methods that deal with the individual node types.
 #define DECLARE_DO(type) void Do##type(L##type* node);
   LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -104,6 +130,7 @@
   MacroAssembler* masm() const { return masm_; }
 
   Register scratch0() { return r9; }
+  DwVfpRegister double_scratch0() { return d0; }
 
   int GetNextEmittedBlock(int block);
   LInstruction* GetNextInstruction();
@@ -149,7 +176,7 @@
                          int arity,
                          LInstruction* instr);
 
-  void LoadPrototype(Register result, Handle<JSObject> prototype);
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
 
   void RegisterLazyDeoptimization(LInstruction* instr);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@@ -194,6 +221,9 @@
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
                                     int deoptimization_index);
+  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+                                              int arguments,
+                                              int deoptimization_index);
   void RecordPosition(int position);
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
@@ -239,6 +269,9 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 5cba955..1028b0e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -466,6 +466,25 @@
 }
 
 
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+  PushSafepointRegisters();
+  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+                      kDoubleSize));
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+    vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+  }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+    vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+  }
+  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+                      kDoubleSize));
+  PopSafepointRegisters();
+}
+
 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
   // The registers are pushed starting with the highest encoding,
   // which means that lowest encodings are closest to the stack pointer.
@@ -519,10 +538,46 @@
 }
 
 
-void MacroAssembler::ClearFPSCRBits(uint32_t bits_to_clear, Register scratch) {
-  vmrs(scratch);
-  bic(scratch, scratch, Operand(bits_to_clear));
-  vmsr(scratch);
+void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
+                                    const Register scratch,
+                                    const Condition cond) {
+  vmrs(scratch, cond);
+  bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
+  vmsr(scratch, cond);
+}
+
+
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+                                           const DwVfpRegister src2,
+                                           const Condition cond) {
+  // Compare and move FPSCR flags to the normal condition flags.
+  VFPCompareAndLoadFlags(src1, src2, pc, cond);
+}
+
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+                                           const double src2,
+                                           const Condition cond) {
+  // Compare and move FPSCR flags to the normal condition flags.
+  VFPCompareAndLoadFlags(src1, src2, pc, cond);
+}
+
+
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+                                            const DwVfpRegister src2,
+                                            const Register fpscr_flags,
+                                            const Condition cond) {
+  // Compare and load FPSCR.
+  vcmp(src1, src2, cond);
+  vmrs(fpscr_flags, cond);
+}
+
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+                                            const double src2,
+                                            const Register fpscr_flags,
+                                            const Condition cond) {
+  // Compare and load FPSCR.
+  vcmp(src1, src2, cond);
+  vmrs(fpscr_flags, cond);
 }
 
 
@@ -682,7 +737,8 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -738,6 +794,7 @@
         Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
     if (flag == CALL_FUNCTION) {
       Call(adaptor, RelocInfo::CODE_TARGET);
+      if (post_call_generator != NULL) post_call_generator->Generate();
       b(done);
     } else {
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -750,12 +807,15 @@
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
 
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+                 post_call_generator);
   if (flag == CALL_FUNCTION) {
     Call(code);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     Jump(code);
@@ -789,7 +849,8 @@
 
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -806,7 +867,7 @@
       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag);
+  InvokeCode(code_reg, expected, actual, flag, post_call_generator);
 }
 
 
@@ -1676,10 +1737,12 @@
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags) {
+                                   InvokeJSFlags flags,
+                                   PostCallGenerator* post_call_generator) {
   GetBuiltinEntry(r2, id);
   if (flags == CALL_JS) {
     Call(r2);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flags == JUMP_JS);
     Jump(r2);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 02bc384..324fbb2 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -33,6 +33,9 @@
 namespace v8 {
 namespace internal {
 
+// Forward declaration.
+class PostCallGenerator;
+
 // ----------------------------------------------------------------------------
 // Static helper functions
 
@@ -229,6 +232,9 @@
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
   void PopSafepointRegisters();
+  void PushSafepointRegistersAndDoubles();
+  void PopSafepointRegistersAndDoubles();
+
   static int SafepointRegisterStackIndex(int reg_code);
 
   // Load two consecutive registers with two consecutive memory locations.
@@ -243,8 +249,29 @@
             const MemOperand& dst,
             Condition cond = al);
 
-  // Clear FPSCR bits.
-  void ClearFPSCRBits(uint32_t bits_to_clear, Register scratch);
+  // Clear specified FPSCR bits.
+  void ClearFPSCRBits(const uint32_t bits_to_clear,
+                      const Register scratch,
+                      const Condition cond = al);
+
+  // Compare double values and move the result to the normal condition flags.
+  void VFPCompareAndSetFlags(const DwVfpRegister src1,
+                             const DwVfpRegister src2,
+                             const Condition cond = al);
+  void VFPCompareAndSetFlags(const DwVfpRegister src1,
+                             const double src2,
+                             const Condition cond = al);
+
+  // Compare double values and then load the fpscr flags to a register.
+  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
+                              const DwVfpRegister src2,
+                              const Register fpscr_flags,
+                              const Condition cond = al);
+  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
+                              const double src2,
+                              const Register fpscr_flags,
+                              const Condition cond = al);
+
 
   // ---------------------------------------------------------------------------
   // Activation frames
@@ -284,7 +311,8 @@
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
@@ -296,7 +324,8 @@
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
@@ -637,7 +666,9 @@
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeJSFlags flags,
+                     PostCallGenerator* post_call_generator = NULL);
 
   // Store the code object for the given builtin in the target register and
   // setup the function in r1.
@@ -688,6 +719,16 @@
     add(reg, reg, Operand(reg), s);
   }
 
+  // Try to convert int32 to smi. If the value is to large, preserve
+  // the original value and jump to not_a_smi. Destroys scratch and
+  // sets flags.
+  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+    mov(scratch, reg);
+    SmiTag(scratch, SetCC);
+    b(vs, not_a_smi);
+    mov(reg, scratch);
+  }
+
   void SmiUntag(Register reg) {
     mov(reg, Operand(reg, ASR, kSmiTagSize));
   }
@@ -745,7 +786,8 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index fbcc9f7..94da042 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -417,8 +417,8 @@
 
 
 void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
-                                                      int reg2,
-                                                      Label* on_not_equal) {
+                                                     int reg2,
+                                                     Label* on_not_equal) {
   __ ldr(r0, register_location(reg1));
   __ ldr(r1, register_location(reg2));
   __ cmp(r0, r1);
@@ -426,7 +426,7 @@
 }
 
 
-void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c,
+void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
                                                 Label* on_not_equal) {
   __ cmp(current_character(), Operand(c));
   BranchOrBacktrack(ne, on_not_equal);
@@ -442,8 +442,8 @@
 }
 
 
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c,
-                                                        uint32_t mask,
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
+                                                        unsigned mask,
                                                         Label* on_not_equal) {
   __ and_(r0, current_character(), Operand(mask));
   __ cmp(r0, Operand(c));
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 4e09f67..b487ba5 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -50,9 +50,9 @@
   virtual void Backtrack();
   virtual void Bind(Label* label);
   virtual void CheckAtStart(Label* on_at_start);
-  virtual void CheckCharacter(uint32_t c, Label* on_equal);
-  virtual void CheckCharacterAfterAnd(uint32_t c,
-                                      uint32_t mask,
+  virtual void CheckCharacter(unsigned c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(unsigned c,
+                                      unsigned mask,
                                       Label* on_equal);
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
   virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -68,9 +68,9 @@
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
                                                Label* on_no_match);
   virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
-  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
-  virtual void CheckNotCharacterAfterAnd(uint32_t c,
-                                         uint32_t mask,
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(unsigned c,
+                                         unsigned mask,
                                          Label* on_not_equal);
   virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
                                               uc16 minus,
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index c2a9796..20e2801 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1952,7 +1952,7 @@
   __ cmp(r7, Operand(HeapNumber::kMantissaBits));
   // If greater or equal, the argument is already round and in r0.
   __ b(&restore_fpscr_and_return, ge);
-  __ b(&slow);
+  __ b(&wont_fit_smi);
 
   __ bind(&no_vfp_exception);
   // Move the result back to general purpose register r0.
@@ -1965,7 +1965,7 @@
   __ mov(r0, Operand(r0, LSL, kSmiTagSize));
 
   // Check for -0.
-  __ cmp(r0, Operand(0));
+  __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(&restore_fpscr_and_return, ne);
   // r5 already holds the HeapNumber exponent.
   __ tst(r5, Operand(HeapNumber::kSignMask));
@@ -1980,10 +1980,10 @@
   __ Ret();
 
   __ bind(&wont_fit_smi);
-  __ bind(&slow);
   // Restore FPCSR and fall to slow case.
   __ vmsr(r3);
 
+  __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
diff --git a/src/array.js b/src/array.js
index 56f5254..0d7a7cb 100644
--- a/src/array.js
+++ b/src/array.js
@@ -117,19 +117,16 @@
     // Fast case for one-element arrays.
     if (length == 1) {
       var e = array[0];
-      if (!IS_UNDEFINED(e) || (0 in array)) {
-        if (IS_STRING(e)) return e;
-        return convert(e);
-      }
-      return '';
+      if (IS_STRING(e)) return e;
+      return convert(e);
     }
 
     // Construct an array for the elements.
     var elements = new $Array(length);
-    var elements_length = 0;
 
     // We pull the empty separator check outside the loop for speed!
     if (separator.length == 0) {
+      var elements_length = 0;
       for (var i = 0; i < length; i++) {
         var e = array[i];
         if (!IS_UNDEFINED(e)) {
@@ -142,16 +139,25 @@
       if (!IS_UNDEFINED(result)) return result;
       return %StringBuilderConcat(elements, elements_length, '');
     }
-    // Non-empty separator.
-    for (var i = 0; i < length; i++) {
-      var e = array[i];
-      if (!IS_UNDEFINED(e)) {
+    // Non-empty separator case.
+    // If the first element is a number then use the heuristic that the 
+    // remaining elements are also likely to be numbers.
+    if (!IS_NUMBER(array[0])) {
+      for (var i = 0; i < length; i++) {
+        var e = array[i];
         if (!IS_STRING(e)) e = convert(e);
         elements[i] = e;
-      } else {
-        elements[i] = '';
       }
-    }
+    } else { 
+      for (var i = 0; i < length; i++) {
+        var e = array[i];
+        if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
+        else {
+          if (!IS_STRING(e)) e = convert(e);
+          elements[i] = e;
+        }
+      }
+    }   
     var result = %_FastAsciiArrayJoin(elements, separator);
     if (!IS_UNDEFINED(result)) return result;   
 
diff --git a/src/assembler.cc b/src/assembler.cc
index e8bcd91..cdcf481 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -66,6 +66,7 @@
 
 const double DoubleConstant::min_int = kMinInt;
 const double DoubleConstant::one_half = 0.5;
+const double DoubleConstant::minus_zero = -0.0;
 const double DoubleConstant::negative_infinity = -V8_INFINITY;
 
 
@@ -729,6 +730,12 @@
 }
 
 
+ExternalReference ExternalReference::address_of_minus_zero() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::minus_zero)));
+}
+
+
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
       const_cast<double*>(&DoubleConstant::negative_infinity)));
diff --git a/src/assembler.h b/src/assembler.h
index 0219de2..5817a15 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -50,6 +50,7 @@
  public:
   static const double min_int;
   static const double one_half;
+  static const double minus_zero;
   static const double negative_infinity;
 };
 
@@ -555,6 +556,7 @@
   // Static variables containing common double constants.
   static ExternalReference address_of_min_int();
   static ExternalReference address_of_one_half();
+  static ExternalReference address_of_minus_zero();
   static ExternalReference address_of_negative_infinity();
 
   Address address() const {return reinterpret_cast<Address>(address_);}
diff --git a/src/ast.cc b/src/ast.cc
index 1a6e768..4fe89be 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -166,12 +166,6 @@
 }
 
 
-bool FunctionLiteral::AllowOptimize() {
-  // We can't deal with heap-allocated locals.
-  return scope()->num_heap_slots() == 0;
-}
-
-
 ObjectLiteral::Property::Property(Literal* key, Expression* value) {
   emit_store_ = true;
   key_ = key;
@@ -215,12 +209,16 @@
 
 
 bool IsEqualString(void* first, void* second) {
+  ASSERT((*reinterpret_cast<String**>(first))->IsString());
+  ASSERT((*reinterpret_cast<String**>(second))->IsString());
   Handle<String> h1(reinterpret_cast<String**>(first));
   Handle<String> h2(reinterpret_cast<String**>(second));
   return (*h1)->Equals(*h2);
 }
 
 bool IsEqualSmi(void* first, void* second) {
+  ASSERT((*reinterpret_cast<Smi**>(first))->IsSmi());
+  ASSERT((*reinterpret_cast<Smi**>(second))->IsSmi());
   Handle<Smi> h1(reinterpret_cast<Smi**>(first));
   Handle<Smi> h2(reinterpret_cast<Smi**>(second));
   return (*h1)->value() == (*h2)->value();
@@ -266,12 +264,12 @@
     // If the key of a computed property is in the table, do not emit
     // a store for the property later.
     if (property->kind() == ObjectLiteral::Property::COMPUTED) {
-      if (table->Lookup(literal, hash, false) != NULL) {
+      if (table->Lookup(key, hash, false) != NULL) {
         property->set_emit_store(false);
       }
     }
     // Add key to the table.
-    table->Lookup(literal, hash, true);
+    table->Lookup(key, hash, true);
   }
 }
 
@@ -641,10 +639,19 @@
     }
   }
 #endif
-  if (receiver_types_ != NULL && receiver_types_->length() > 0) {
-    Handle<Map> type = receiver_types_->at(0);
-    is_monomorphic_ = oracle->CallIsMonomorphic(this);
-    if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
+  is_monomorphic_ = oracle->CallIsMonomorphic(this);
+  check_type_ = oracle->GetCallCheckType(this);
+  if (is_monomorphic_) {
+    Handle<Map> map;
+    if (receiver_types_ != NULL && receiver_types_->length() > 0) {
+      ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+      map = receiver_types_->at(0);
+    } else {
+      ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+      map = Handle<Map>(
+          oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
+    }
+    is_monomorphic_ = ComputeTarget(map, name);
   }
 }
 
diff --git a/src/ast.h b/src/ast.h
index ba422fd..f55ddcd 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1268,6 +1268,7 @@
         arguments_(arguments),
         pos_(pos),
         is_monomorphic_(false),
+        check_type_(RECEIVER_MAP_CHECK),
         receiver_types_(NULL),
         return_id_(GetNextId()) {
   }
@@ -1283,6 +1284,7 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
   virtual bool IsMonomorphic() { return is_monomorphic_; }
+  CheckType check_type() const { return check_type_; }
   Handle<JSFunction> target() { return target_; }
   Handle<JSObject> holder() { return holder_; }
   Handle<JSGlobalPropertyCell> cell() { return cell_; }
@@ -1306,6 +1308,7 @@
   int pos_;
 
   bool is_monomorphic_;
+  CheckType check_type_;
   ZoneMapList* receiver_types_;
   Handle<JSFunction> target_;
   Handle<JSObject> holder_;
@@ -1714,7 +1717,6 @@
   int num_parameters() { return num_parameters_; }
 
   bool AllowsLazyCompilation();
-  bool AllowOptimize();
 
   Handle<String> debug_name() const {
     if (name_->length() > 0) return name_;
diff --git a/src/builtins.cc b/src/builtins.cc
index 0c76f69..a659c46 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -636,15 +636,20 @@
       return CallJsBuiltin("ArraySlice", args);
     }
     elms = FixedArray::cast(JSObject::cast(receiver)->elements());
-    len = elms->length();
-#ifdef DEBUG
-    // Arguments object by construction should have no holes, check it.
-    if (FLAG_enable_slow_asserts) {
-      for (int i = 0; i < len; i++) {
-        ASSERT(elms->get(i) != Heap::the_hole_value());
+    Object* len_obj = JSObject::cast(receiver)
+        ->InObjectPropertyAt(Heap::arguments_length_index);
+    if (!len_obj->IsSmi()) {
+      return CallJsBuiltin("ArraySlice", args);
+    }
+    len = Smi::cast(len_obj)->value();
+    if (len > elms->length()) {
+      return CallJsBuiltin("ArraySlice", args);
+    }
+    for (int i = 0; i < len; i++) {
+      if (elms->get(i) == Heap::the_hole_value()) {
+        return CallJsBuiltin("ArraySlice", args);
       }
     }
-#endif
   }
   ASSERT(len >= 0);
   int n_arguments = args.length() - 1;
diff --git a/src/compiler.cc b/src/compiler.cc
index e4864e4..0bd9730 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -92,6 +92,25 @@
 }
 
 
+void CompilationInfo::DisableOptimization() {
+  if (FLAG_optimize_closures) {
+    // If we allow closures optimizations and it's an optimizable closure
+    // mark it correspondingly.
+    bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
+    if (is_closure) {
+      bool is_optimizable_closure =
+          !scope_->outer_scope_calls_eval() && !scope_->inside_with();
+      if (is_optimizable_closure) {
+        SetMode(BASE);
+        return;
+      }
+    }
+  }
+
+  SetMode(NONOPT);
+}
+
+
 // Determine whether to use the full compiler for all code. If the flag
 // --always-full-compiler is specified this is the case. For the virtual frame
 // based compiler the full compiler is also used if a debugger is connected, as
@@ -262,7 +281,9 @@
     HTracer::Instance()->TraceCompilation(info->function());
   }
 
-  TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
+  TypeFeedbackOracle oracle(
+      Handle<Code>(info->shared_info()->code()),
+      Handle<Context>(info->closure()->context()->global_context()));
   HGraphBuilder builder(&oracle);
   HPhase phase(HPhase::kTotal);
   HGraph* graph = builder.CreateGraph(info);
diff --git a/src/compiler.h b/src/compiler.h
index 1176c69..68066aa 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -114,7 +114,7 @@
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
   }
-  void DisableOptimization() { SetMode(NONOPT); }
+  void DisableOptimization();
 
   // Deoptimization support.
   bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
@@ -125,9 +125,7 @@
 
   // Determine whether or not we can adaptively optimize.
   bool AllowOptimize() {
-    return V8::UseCrankshaft() &&
-           !closure_.is_null() &&
-           function_->AllowOptimize();
+    return V8::UseCrankshaft() && !closure_.is_null();
   }
 
  private:
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index dcff07c..1adf73a 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -112,8 +112,8 @@
 
 
 // Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
-  var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
+function MakeBreakPoint(source_position, opt_script_break_point) {
+  var break_point = new BreakPoint(source_position, opt_script_break_point);
   break_points.push(break_point);
   return break_point;
 }
@@ -123,10 +123,8 @@
 // NOTE: This object does not have a reference to the function having break
 // point as this would cause function not to be garbage collected when it is
 // not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+function BreakPoint(source_position, opt_script_break_point) {
   this.source_position_ = source_position;
-  this.source_line_ = opt_line;
-  this.source_column_ = opt_column;
   if (opt_script_break_point) {
     this.script_break_point_ = opt_script_break_point;
   } else {
@@ -424,7 +422,7 @@
   if (position === null) return;
 
   // Create a break point object and set the break point.
-  break_point = MakeBreakPoint(position, this.line(), this.column(), this);
+  break_point = MakeBreakPoint(position, this);
   break_point.setIgnoreCount(this.ignoreCount());
   var actual_position = %SetScriptBreakPoint(script, position, break_point);
   if (IS_UNDEFINED(actual_position)) {
@@ -639,7 +637,7 @@
                                         opt_condition);
   } else {
     // Set a break point directly on the function.
-    var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
+    var break_point = MakeBreakPoint(source_position);
     var actual_position =
         %SetFunctionBreakPoint(func, source_position, break_point);
     actual_position += this.sourcePosition(func);
@@ -652,6 +650,25 @@
 };
 
 
+Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
+                                                    condition, enabled)
+{
+  break_point = MakeBreakPoint(position);
+  break_point.setCondition(condition);
+  if (!enabled)
+    break_point.disable();
+  var scripts = this.scripts();
+  for (var i = 0; i < scripts.length; i++) {
+    if (script_id == scripts[i].id) {
+      break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
+                                                         break_point);
+      break;
+    }
+  }
+  return break_point;
+};
+
+
 Debug.enableBreakPoint = function(break_point_number) {
   var break_point = this.findBreakPoint(break_point_number, false);
   // Only enable if the breakpoint hasn't been deleted:
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
new file mode 100644
index 0000000..73888fc
--- /dev/null
+++ b/src/extensions/experimental/experimental.gyp
@@ -0,0 +1,50 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'icu_src_dir%': '',
+  },
+  'targets': [
+    {
+      'target_name': 'i18n_api',
+      'type': 'static_library',
+      'sources': [
+        'i18n-extension.cc',
+        'i18n-extension.h',
+      ],
+      'include_dirs': [
+        '<(icu_src_dir)/public/common',
+        '../..',
+      ],
+      'dependencies': [
+        '<(icu_src_dir)/icu.gyp:*',
+        '../../../tools/gyp/v8.gyp:v8',
+      ],
+    },
+  ],  # targets
+}
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 6e73258..daadef6 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -141,6 +141,7 @@
 #endif
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
+DEFINE_bool(optimize_closures, true, "optimize closures")
 
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
diff --git a/src/frames.cc b/src/frames.cc
index 3af7288..16ffbf5 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -329,21 +329,20 @@
 
 
 Code* StackFrame::GetSafepointData(Address pc,
-                                   uint8_t** safepoint_entry,
+                                   SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
   PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
-  uint8_t* cached_safepoint_entry = entry->safepoint_entry;
-  if (cached_safepoint_entry == NULL) {
-    cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
-    ASSERT(cached_safepoint_entry != NULL);  // No safepoint found.
-    entry->safepoint_entry = cached_safepoint_entry;
+  SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
+  if (!entry->safepoint_entry.is_valid()) {
+    entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+    ASSERT(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
+    ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
   }
 
   // Fill in the results and return the code.
   Code* code = entry->code;
-  *safepoint_entry = cached_safepoint_entry;
+  *safepoint_entry = entry->safepoint_entry;
   *stack_slots = code->stack_slots();
   return code;
 }
@@ -536,7 +535,7 @@
 
   // Compute the safepoint information.
   unsigned stack_slots = 0;
-  uint8_t* safepoint_entry = NULL;
+  SafepointEntry safepoint_entry;
   Code* code = StackFrame::GetSafepointData(
       pc(), &safepoint_entry, &stack_slots);
   unsigned slot_space = stack_slots * kPointerSize;
@@ -548,10 +547,22 @@
   Object** parameters_limit = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
 
+  // Visit the parameters that may be on top of the saved registers.
+  if (safepoint_entry.argument_count() > 0) {
+    v->VisitPointers(parameters_base,
+                     parameters_base + safepoint_entry.argument_count());
+    parameters_base += safepoint_entry.argument_count();
+  }
+
+  if (safepoint_entry.has_doubles()) {
+    parameters_base += DoubleRegister::kNumAllocatableRegisters *
+        kDoubleSize / kPointerSize;
+  }
+
   // Visit the registers that contain pointers if any.
-  if (SafepointTable::HasRegisters(safepoint_entry)) {
+  if (safepoint_entry.HasRegisters()) {
     for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
-      if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
+      if (safepoint_entry.HasRegisterAt(i)) {
         int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
         v->VisitPointer(parameters_base + reg_stack_index);
       }
@@ -561,7 +572,8 @@
   }
 
   // We're done dealing with the register bits.
-  safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
+  uint8_t* safepoint_bits = safepoint_entry.bits();
+  safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
 
   // Visit the rest of the parameters.
   v->VisitPointers(parameters_base, parameters_limit);
@@ -570,7 +582,7 @@
   for (unsigned index = 0; index < stack_slots; index++) {
     int byte_index = index >> kBitsPerByteLog2;
     int bit_index = index & (kBitsPerByte - 1);
-    if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
+    if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
       v->VisitPointer(parameters_limit + index);
     }
   }
@@ -778,14 +790,8 @@
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
 
-  SafepointTable table(code);
-  unsigned pc_offset = static_cast<unsigned>(pc() - code->instruction_start());
-  for (unsigned i = 0; i < table.length(); i++) {
-    if (table.GetPcOffset(i) == pc_offset) {
-      *deopt_index = table.GetDeoptimizationIndex(i);
-      break;
-    }
-  }
+  SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
+  *deopt_index = safepoint_entry.deoptimization_index();
   ASSERT(*deopt_index != AstNode::kNoNumber);
 
   return DeoptimizationInputData::cast(code->deoptimization_data());
@@ -1150,7 +1156,7 @@
     // been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
     entry->code = GcSafeFindCodeForPc(pc);
-    entry->safepoint_entry = NULL;
+    entry->safepoint_entry.Reset();
     entry->pc = pc;
   }
   return entry;
diff --git a/src/frames.h b/src/frames.h
index 778f9d2..5378709 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -28,6 +28,8 @@
 #ifndef V8_FRAMES_H_
 #define V8_FRAMES_H_
 
+#include "safepoint-table.h"
+
 namespace v8 {
 namespace internal {
 
@@ -51,7 +53,7 @@
   struct PcToCodeCacheEntry {
     Address pc;
     Code* code;
-    uint8_t* safepoint_entry;
+    SafepointEntry safepoint_entry;
   };
 
   static PcToCodeCacheEntry* cache(int index) {
@@ -208,7 +210,7 @@
   // safepoint entry and the number of stack slots. The pc must be at
   // a safepoint.
   static Code* GetSafepointData(Address pc,
-                                uint8_t** safepoint_entry,
+                                SafepointEntry* safepoint_entry,
                                 unsigned* stack_slots);
 
   virtual void Iterate(ObjectVisitor* v) const = 0;
diff --git a/src/heap.cc b/src/heap.cc
index 5832ccb..32d751a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2724,6 +2724,9 @@
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_info(ByteArray::cast(reloc_info));
   code->set_flags(flags);
+  if (code->is_call_stub() || code->is_keyed_call_stub()) {
+    code->set_check_type(RECEIVER_MAP_CHECK);
+  }
   code->set_deoptimization_data(empty_fixed_array());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
@@ -5029,7 +5032,7 @@
       obj->SetMark();
     }
     UnmarkingVisitor visitor;
-    Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
+    Heap::IterateRoots(&visitor, VISIT_ALL);
     while (visitor.can_process())
       visitor.ProcessNext();
   }
diff --git a/src/heap.h b/src/heap.h
index 25384d2..0d79081 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1865,7 +1865,7 @@
     }
 
     ~Scope() {
-      ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
+      ASSERT(scope_ < kNumberOfScopes);  // scope_ is unsigned.
       tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
     }
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 3f39888..89478f5 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1190,6 +1190,11 @@
 }
 
 
+void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
+  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
 // Implementation of type inference and type conversions. Calculates
 // the inferred type of this instruction based on the input operands.
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index f7eb173..4a23f2a 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -92,6 +92,7 @@
 //       HCallNew
 //       HCallRuntime
 //     HCallStub
+//     HCheckPrototypeMaps
 //     HConstant
 //     HControlInstruction
 //       HDeoptimize
@@ -106,6 +107,7 @@
 //     HGlobalObject
 //     HGlobalReceiver
 //     HLeaveInlined
+//     HLoadContextSlot
 //     HLoadGlobal
 //     HMaterializedLiteral
 //       HArrayLiteral
@@ -125,7 +127,6 @@
 //       HCheckInstanceType
 //       HCheckMap
 //       HCheckNonSmi
-//       HCheckPrototypeMaps
 //       HCheckSmi
 //       HDeleteProperty
 //       HFixedArrayLength
@@ -220,6 +221,7 @@
   V(JSArrayLength)                             \
   V(ClassOfTest)                               \
   V(LeaveInlined)                              \
+  V(LoadContextSlot)                           \
   V(LoadElements)                              \
   V(LoadGlobal)                                \
   V(LoadKeyedFastElement)                      \
@@ -1622,42 +1624,40 @@
 };
 
 
-class HCheckPrototypeMaps: public HUnaryOperation {
+class HCheckPrototypeMaps: public HInstruction {
  public:
-  HCheckPrototypeMaps(HValue* value,
-                      Handle<JSObject> holder,
-                      Handle<Map> receiver_map)
-      : HUnaryOperation(value),
-        holder_(holder),
-        receiver_map_(receiver_map) {
-    set_representation(Representation::Tagged());
+  HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
+      : prototype_(prototype), holder_(holder) {
     SetFlag(kUseGVN);
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::Tagged();
-  }
-
 #ifdef DEBUG
   virtual void Verify() const;
 #endif
 
+  Handle<JSObject> prototype() const { return prototype_; }
   Handle<JSObject> holder() const { return holder_; }
-  Handle<Map> receiver_map() const { return receiver_map_; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
 
+  virtual intptr_t Hashcode() const {
+    ASSERT(!Heap::IsAllocationAllowed());
+    intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
+    hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
+    return hash;
+  }
+
  protected:
   virtual bool DataEquals(HValue* other) const {
     HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
-    return holder_.is_identical_to(b->holder()) &&
-        receiver_map_.is_identical_to(b->receiver_map());
+    return prototype_.is_identical_to(b->prototype()) &&
+        holder_.is_identical_to(b->holder());
   }
 
  private:
+  Handle<JSObject> prototype_;
   Handle<JSObject> holder_;
-  Handle<Map> receiver_map_;
 };
 
 
@@ -2601,6 +2601,39 @@
 };
 
 
+class HLoadContextSlot: public HInstruction {
+ public:
+  HLoadContextSlot(int context_chain_length , int slot_index)
+      : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
+  }
+
+  int context_chain_length() const { return context_chain_length_; }
+  int slot_index() const { return slot_index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual intptr_t Hashcode() const {
+    return context_chain_length() * 29 + slot_index();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadContextSlot* b = HLoadContextSlot::cast(other);
+    return (context_chain_length() == b->context_chain_length())
+        && (slot_index() == b->slot_index());
+  }
+
+ private:
+  int context_chain_length_;
+  int slot_index_;
+};
+
+
 class HLoadNamedField: public HUnaryOperation {
  public:
   HLoadNamedField(HValue* object, bool is_in_object, int offset)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 0d92b2e..7aa66fd 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -2940,6 +2940,21 @@
       BAILOUT("unsupported context for arguments object");
     }
     ast_context()->ReturnValue(environment()->Lookup(variable));
+  } else if (variable->IsContextSlot()) {
+    if (variable->mode() == Variable::CONST) {
+      BAILOUT("reference to const context slot");
+    }
+    Slot* slot = variable->AsSlot();
+    CompilationInfo* info = graph()->info();
+    int context_chain_length = info->function()->scope()->
+        ContextChainLength(slot->var()->scope());
+    ASSERT(context_chain_length >= 0);
+    // TODO(antonm): if slot's value is not modified by closures, instead
+    // of reading it out of context, we could just embed the value as
+    // a constant.
+    HLoadContextSlot* instr =
+        new HLoadContextSlot(context_chain_length, slot->index());
+    ast_context()->ReturnInstruction(instr, expr->id());
   } else if (variable->is_global()) {
     LookupResult lookup;
     LookupGlobalPropertyCell(variable, &lookup, false);
@@ -2956,7 +2971,7 @@
     HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
     ast_context()->ReturnInstruction(instr, expr->id());
   } else {
-    BAILOUT("reference to non-stack-allocated/non-global variable");
+    BAILOUT("reference to a variable which requires dynamic lookup");
   }
 }
 
@@ -3482,7 +3497,7 @@
                                      Top(),
                                      expr->position(),
                                      expr->AssignmentId());
-    } else {
+    } else if (var->IsStackAllocated()) {
       // We allow reference to the arguments object only in assignemtns
       // to local variables to make sure that the arguments object does
       // not escape and is not modified.
@@ -3495,6 +3510,8 @@
         VISIT_FOR_VALUE(expr->value());
       }
       Bind(proxy->var(), Top());
+    } else {
+      BAILOUT("Assigning to no non-stack-allocated/non-global variable");
     }
     // Return the value.
     ast_context()->ReturnValue(Pop());
@@ -3795,9 +3812,9 @@
     AddInstruction(new HCheckMap(receiver, receiver_map));
   }
   if (!expr->holder().is_null()) {
-    AddInstruction(new HCheckPrototypeMaps(receiver,
-                                           expr->holder(),
-                                           receiver_map));
+    AddInstruction(new HCheckPrototypeMaps(
+        Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
+        expr->holder()));
   }
 }
 
@@ -4009,7 +4026,9 @@
     function_return_->MarkAsInlineReturnTarget();
   }
   call_context_ = ast_context();
-  TypeFeedbackOracle new_oracle(Handle<Code>(shared->code()));
+  TypeFeedbackOracle new_oracle(
+      Handle<Code>(shared->code()),
+      Handle<Context>(target->context()->global_context()));
   oracle_ = &new_oracle;
   graph()->info()->SetOsrAstId(AstNode::kNoNumber);
 
@@ -4211,7 +4230,8 @@
   HValue* arg_two_value = environment()->Lookup(arg_two->var());
   if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
 
-  if (!expr->IsMonomorphic()) return false;
+  if (!expr->IsMonomorphic() ||
+      expr->check_type() != RECEIVER_MAP_CHECK) return false;
 
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
@@ -4280,7 +4300,7 @@
     expr->RecordTypeFeedback(oracle());
     ZoneMapList* types = expr->GetReceiverTypes();
 
-    if (expr->IsMonomorphic()) {
+    if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) {
       AddCheckConstantFunction(expr, receiver, types->first(), true);
 
       if (TryMathFunctionInline(expr)) {
@@ -4305,6 +4325,7 @@
       }
 
     } else if (types != NULL && types->length() > 1) {
+      ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
       HandlePolymorphicCallNamed(expr, receiver, types, name);
       return;
 
@@ -5713,31 +5734,40 @@
     PrintF("%30s", names_[i]);
     double ms = static_cast<double>(timing_[i]) / 1000;
     double percent = static_cast<double>(timing_[i]) * 100 / sum;
-    PrintF(" - %0.3f ms / %0.3f %% \n", ms, percent);
+    PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
+
+    unsigned size = sizes_[i];
+    double size_percent = static_cast<double>(size) * 100 / total_size_;
+    PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
   }
-  PrintF("%30s - %0.3f ms \n", "Sum", static_cast<double>(sum) / 1000);
+  PrintF("%30s - %7.3f ms           %8u bytes\n", "Sum",
+         static_cast<double>(sum) / 1000,
+         total_size_);
   PrintF("---------------------------------------------------------------\n");
-  PrintF("%30s - %0.3f ms (%0.1f times slower than full code gen)\n",
+  PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
          "Total",
          static_cast<double>(total_) / 1000,
          static_cast<double>(total_) / full_code_gen_);
 }
 
 
-void HStatistics::SaveTiming(const char* name, int64_t ticks) {
+void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
   if (name == HPhase::kFullCodeGen) {
     full_code_gen_ += ticks;
   } else if (name == HPhase::kTotal) {
     total_ += ticks;
   } else {
+    total_size_ += size;
     for (int i = 0; i < names_.length(); ++i) {
       if (names_[i] == name) {
         timing_[i] += ticks;
+        sizes_[i] += size;
         return;
       }
     }
     names_.Add(name);
     timing_.Add(ticks);
+    sizes_.Add(size);
   }
 }
 
@@ -5758,13 +5788,15 @@
     chunk_ = allocator->chunk();
   }
   if (FLAG_time_hydrogen) start_ = OS::Ticks();
+  start_allocation_size_ = Zone::allocation_size_;
 }
 
 
 void HPhase::End() const {
   if (FLAG_time_hydrogen) {
     int64_t end = OS::Ticks();
-    HStatistics::Instance()->SaveTiming(name_, end - start_);
+    unsigned size = Zone::allocation_size_ - start_allocation_size_;
+    HStatistics::Instance()->SaveTiming(name_, end - start_, size);
   }
 
   if (FLAG_trace_hydrogen) {
@@ -5777,7 +5809,6 @@
 
 #ifdef DEBUG
   if (graph_ != NULL) graph_->Verify();
-  if (chunk_ != NULL) chunk_->Verify();
   if (allocator_ != NULL) allocator_->Verify();
 #endif
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 872ae98..35165ae 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -906,7 +906,7 @@
 class HStatistics: public Malloced {
  public:
   void Print();
-  void SaveTiming(const char* name, int64_t ticks);
+  void SaveTiming(const char* name, int64_t ticks, unsigned size);
   static HStatistics* Instance() {
     static SetOncePointer<HStatistics> instance;
     if (!instance.is_set()) {
@@ -917,11 +917,19 @@
 
  private:
 
-  HStatistics() : timing_(5), names_(5), total_(0), full_code_gen_(0) { }
+  HStatistics()
+      : timing_(5),
+        names_(5),
+        sizes_(5),
+        total_(0),
+        total_size_(0),
+        full_code_gen_(0) { }
 
   List<int64_t> timing_;
   List<const char*> names_;
+  List<unsigned> sizes_;
   int64_t total_;
+  unsigned total_size_;
   int64_t full_code_gen_;
 };
 
@@ -958,6 +966,7 @@
   HGraph* graph_;
   LChunk* chunk_;
   LAllocator* allocator_;
+  unsigned start_allocation_size_;
 };
 
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index c173a3d..552d7b5 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2465,6 +2465,17 @@
 }
 
 
+void Assembler::por(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xEB);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
@@ -2489,6 +2500,40 @@
 }
 
 
+void Assembler::psllq(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xF3);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrlq(XMMRegister reg, int8_t shift) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x73);
+  emit_sse_operand(edx, reg);  // edx == 2
+  EMIT(shift);
+}
+
+
+void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xD3);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 11acb56..20446b0 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -919,9 +919,13 @@
 
   void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
+  void por(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
   void psllq(XMMRegister reg, int8_t shift);
+  void psllq(XMMRegister dst, XMMRegister src);
+  void psrlq(XMMRegister reg, int8_t shift);
+  void psrlq(XMMRegister dst, XMMRegister src);
   void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
   void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 91fb050..72213dc 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2015,8 +2015,7 @@
 
 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label call_runtime;
-  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
-         operands_type_ == TRBinaryOpIC::INT32);
+  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
 
   // Floating point case.
   switch (op_) {
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index e3b0dfc..1ecfd39 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -6649,38 +6649,41 @@
 
 
 void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop, loop_condition,
+      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+
   ASSERT(args->length() == 2);
+  // We will leave the separator on the stack until the end of the function.
   Load(args->at(1));
+  // Load this to eax (= array)
   Load(args->at(0));
   Result array_result = frame_->Pop();
   array_result.ToRegister(eax);
   frame_->SpillAll();
 
-  Label bailout;
-  Label done;
   // All aliases of the same register have disjoint lifetimes.
   Register array = eax;
-  Register result_pos = no_reg;
+  Register elements = no_reg;  // Will be eax.
 
-  Register index = edi;
+  Register index = edx;
 
-  Register current_string_length = ecx;  // Will be ecx when live.
+  Register string_length = ecx;
 
-  Register current_string = edx;
+  Register string = esi;
 
   Register scratch = ebx;
 
-  Register scratch_2 = esi;
-  Register new_padding_chars = scratch_2;
+  Register array_length = edi;
+  Register result_pos = no_reg;  // Will be edi.
 
-  Operand separator = Operand(esp, 4 * kPointerSize);  // Already pushed.
-  Operand elements = Operand(esp, 3 * kPointerSize);
-  Operand result = Operand(esp, 2 * kPointerSize);
-  Operand padding_chars = Operand(esp, 1 * kPointerSize);
-  Operand array_length = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(4 * kPointerSize));
-
-  // Check that eax is a JSArray
+  // Separator operand is already pushed.
+  Operand separator_operand = Operand(esp, 2 * kPointerSize);
+  Operand result_operand = Operand(esp, 1 * kPointerSize);
+  Operand array_length_operand = Operand(esp, 0);
+  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ cld();
+  // Check that the array is a JSArray
   __ test(array, Immediate(kSmiTagMask));
   __ j(zero, &bailout);
   __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -6691,140 +6694,226 @@
             1 << Map::kHasFastElements);
   __ j(zero, &bailout);
 
-  // If the array is empty, return the empty string.
-  __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
-  __ sar(scratch, 1);
-  Label non_trivial;
-  __ j(not_zero, &non_trivial);
-  __ mov(result, Factory::empty_string());
+  // If the array has length zero, return the empty string.
+  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+  __ sar(array_length, 1);
+  __ j(not_zero, &non_trivial_array);
+  __ mov(result_operand, Factory::empty_string());
   __ jmp(&done);
 
-  __ bind(&non_trivial);
-  __ mov(array_length, scratch);
+  // Save the array length.
+  __ bind(&non_trivial_array);
+  __ mov(array_length_operand, array_length);
 
-  __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
-  __ mov(elements, scratch);
-
+  // Save the FixedArray containing array's elements.
   // End of array's live range.
-  result_pos = array;
+  elements = array;
+  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
   array = no_reg;
 
 
-  // Check that the separator is a flat ascii string.
-  __ mov(current_string, separator);
-  __ test(current_string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  // If the separator is the empty string, replace it with NULL.
-  // The test for NULL is quicker than the empty string test, in a loop.
-  __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
-         Immediate(0));
-  Label separator_checked;
-  __ j(not_zero, &separator_checked);
-  __ mov(separator, Immediate(0));
-  __ bind(&separator_checked);
-
-  // Check that elements[0] is a flat ascii string, and copy it in new space.
-  __ mov(scratch, elements);
-  __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
-  __ test(current_string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Allocate space to copy it.  Round up the size to the alignment granularity.
-  __ mov(current_string_length,
-         FieldOperand(current_string, String::kLengthOffset));
-  __ shr(current_string_length, 1);
-
-  // Live registers and stack values:
-  //   current_string_length: length of elements[0].
-
-  // New string result in new space = elements[0]
-  __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
-                         index, no_reg, &bailout);
-  __ mov(result, result_pos);
-
-  // Adjust current_string_length to include padding bytes at end of string.
-  // Keep track of the number of padding bytes.
-  __ mov(new_padding_chars, current_string_length);
-  __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
-  __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
-  __ sub(new_padding_chars, Operand(current_string_length));
-  __ neg(new_padding_chars);
-  __ mov(padding_chars, new_padding_chars);
-
-  Label copy_loop_1_done;
-  Label copy_loop_1;
-  __ test(current_string_length, Operand(current_string_length));
-  __ j(zero, &copy_loop_1_done);
-  __ bind(&copy_loop_1);
-  __ sub(Operand(current_string_length), Immediate(kPointerSize));
-  __ mov(scratch, FieldOperand(current_string, current_string_length,
-                               times_1, SeqAsciiString::kHeaderSize));
-  __ mov(FieldOperand(result_pos, current_string_length,
-                      times_1, SeqAsciiString::kHeaderSize),
-         scratch);
-  __ j(not_zero, &copy_loop_1);
-  __ bind(&copy_loop_1_done);
-
-  __ mov(index, Immediate(1));
+  // Check that all array elements are sequential ASCII strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ Set(index, Immediate(0));
+  __ Set(string_length, Immediate(0));
   // Loop condition: while (index < length).
-  Label loop;
+  // Live loop registers: index, array_length, string,
+  //                      scratch, string_length, elements.
+  __ jmp(&loop_condition);
   __ bind(&loop);
-  __ cmp(index, array_length);
+  __ cmp(index, Operand(array_length));
   __ j(greater_equal, &done);
 
-  // If the separator is the empty string, signalled by NULL, skip it.
-  Label separator_done;
-  __ mov(current_string, separator);
-  __ test(current_string, Operand(current_string));
-  __ j(zero, &separator_done);
-
-  // Append separator to result.  It is known to be a flat ascii string.
-  __ AppendStringToTopOfNewSpace(current_string, current_string_length,
-                                 result_pos, scratch, scratch_2, result,
-                                 padding_chars, &bailout);
-  __ bind(&separator_done);
-
-  // Add next element of array to the end of the result.
-  // Get current_string = array[index].
-  __ mov(scratch, elements);
-  __ mov(current_string, FieldOperand(scratch, index,
+  __ mov(string, FieldOperand(elements, index,
                                       times_pointer_size,
                                       FixedArray::kHeaderSize));
-  // If current != flat ascii string drop result, return undefined.
-  __ test(current_string, Immediate(kSmiTagMask));
+  __ test(string, Immediate(kSmiTagMask));
   __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, Immediate(
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
+  __ j(not_equal, &bailout);
+  __ add(string_length,
+         FieldOperand(string, SeqAsciiString::kLengthOffset));
+  __ j(overflow, &bailout);
+  __ add(Operand(index), Immediate(1));
+  __ bind(&loop_condition);
+  __ cmp(index, Operand(array_length));
+  __ j(less, &loop);
+
+  // If array_length is 1, return elements[0], a string.
+  __ cmp(array_length, 1);
+  __ j(not_equal, &not_size_one_array);
+  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+  __ mov(result_operand, scratch);
+  __ jmp(&done);
+
+  __ bind(&not_size_one_array);
+
+  // End of array_length live range.
+  result_pos = array_length;
+  array_length = no_reg;
+
+  // Live registers:
+  // string_length: Sum of string lengths, as a smi.
+  // elements: FixedArray of strings.
+
+  // Check that the separator is a flat ASCII string.
+  __ mov(string, separator_operand);
+  __ test(string, Immediate(kSmiTagMask));
+  __ j(zero, &bailout);
+  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, Immediate(
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
   __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
   __ j(not_equal, &bailout);
 
-  // Append current to the result.
-  __ AppendStringToTopOfNewSpace(current_string, current_string_length,
-                                 result_pos, scratch, scratch_2, result,
-                                 padding_chars, &bailout);
+  // Add (separator length times array_length) - separator length
+  // to string_length.
+  __ mov(scratch, separator_operand);
+  __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
+  __ imul(scratch, array_length_operand);
+  __ j(overflow, &bailout);
+  __ add(string_length, Operand(scratch));
+  __ j(overflow, &bailout);
+
+  __ shr(string_length, 1);
+  // Live registers and stack values:
+  //   string_length
+  //   elements
+  __ AllocateAsciiString(result_pos, string_length, scratch,
+                         index, string, &bailout);
+  __ mov(result_operand, result_pos);
+  __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+  __ mov(string, separator_operand);
+  __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+         Immediate(Smi::FromInt(1)));
+  __ j(equal, &one_char_separator);
+  __ j(greater, &long_separator);
+
+
+  // Empty separator case
+  __ mov(index, Immediate(0));
+  __ jmp(&loop_1_condition);
+  // Loop condition: while (index < length).
+  __ bind(&loop_1);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+  //   elements: the FixedArray of strings we are joining.
+
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
   __ add(Operand(index), Immediate(1));
-  __ jmp(&loop);  // End while (index < length).
+  __ bind(&loop_1_condition);
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_1);  // End while (index < length).
+  __ jmp(&done);
+
+
+
+  // One-character separator case
+  __ bind(&one_char_separator);
+  // Replace separator with its ascii character value.
+  __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ mov_b(separator_operand, scratch);
+
+  __ Set(index, Immediate(0));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator
+  __ jmp(&loop_2_entry);
+  // Loop condition: while (index < length).
+  __ bind(&loop_2);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+
+  // Copy the separator character to the result.
+  __ mov_b(scratch, separator_operand);
+  __ mov_b(Operand(result_pos, 0), scratch);
+  __ inc(result_pos);
+
+  __ bind(&loop_2_entry);
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+  __ add(Operand(index), Immediate(1));
+
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_2);  // End while (index < length).
+  __ jmp(&done);
+
+
+  // Long separator case (separator is more than one character).
+  __ bind(&long_separator);
+
+  __ Set(index, Immediate(0));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator
+  __ jmp(&loop_3_entry);
+  // Loop condition: while (index < length).
+  __ bind(&loop_3);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+
+  // Copy the separator to the result.
+  __ mov(string, separator_operand);
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+
+  __ bind(&loop_3_entry);
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+  __ add(Operand(index), Immediate(1));
+
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_3);  // End while (index < length).
+  __ jmp(&done);
+
 
   __ bind(&bailout);
-  __ mov(result, Factory::undefined_value());
+  __ mov(result_operand, Factory::undefined_value());
   __ bind(&done);
-  __ mov(eax, result);
+  __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(4 * kPointerSize));
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   frame_->Drop(1);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index ceba249..3050c56 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -56,8 +58,9 @@
   SafepointTable table(function->code());
   for (unsigned i = 0; i < table.length(); i++) {
     unsigned pc_offset = table.GetPcOffset(i);
-    int deoptimization_index = table.GetDeoptimizationIndex(i);
-    int gap_code_size = table.GetGapCodeSize(i);
+    SafepointEntry safepoint_entry = table.GetEntry(i);
+    int deoptimization_index = safepoint_entry.deoptimization_index();
+    int gap_code_size = safepoint_entry.gap_code_size();
 #ifdef DEBUG
     // Destroy the code which is not supposed to run again.
     unsigned instructions = pc_offset - last_pc_offset;
@@ -617,3 +620,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index dfbcbb7..4028a93 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1182,15 +1182,33 @@
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
+          } else if (*data == 0xF3) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("psllq %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0x73) {
             data++;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             int8_t imm8 = static_cast<int8_t>(data[1]);
-            AppendToBuffer("psllq %s,%d",
+            ASSERT(regop == esi || regop == edx);
+            AppendToBuffer("%s %s,%d",
+                           (regop == esi) ? "psllq" : "psrlq",
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
+          } else if (*data == 0xD3) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("psrlq %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0x7F) {
             AppendToBuffer("movdqa ");
             data++;
@@ -1228,6 +1246,14 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0xEB) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("por %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else {
             UnimplementedInstruction();
           }
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5f30858..2622b5e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -3351,39 +3351,37 @@
 
 
 void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  Label bailout;
-  Label done;
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop, loop_condition,
+      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
   // Load this to eax (= array)
   VisitForAccumulatorValue(args->at(0));
-
   // All aliases of the same register have disjoint lifetimes.
   Register array = eax;
-  Register result_pos = no_reg;
+  Register elements = no_reg;  // Will be eax.
 
-  Register index = edi;
+  Register index = edx;
 
-  Register current_string_length = ecx;  // Will be ecx when live.
+  Register string_length = ecx;
 
-  Register current_string = edx;
+  Register string = esi;
 
   Register scratch = ebx;
 
-  Register scratch_2 = esi;
-  Register new_padding_chars = scratch_2;
+  Register array_length = edi;
+  Register result_pos = no_reg;  // Will be edi.
 
-  Operand separator = Operand(esp, 4 * kPointerSize);  // Already pushed.
-  Operand elements = Operand(esp, 3 * kPointerSize);
-  Operand result = Operand(esp, 2 * kPointerSize);
-  Operand padding_chars = Operand(esp, 1 * kPointerSize);
-  Operand array_length = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(4 * kPointerSize));
-
-
-  // Check that eax is a JSArray
+  // Separator operand is already pushed.
+  Operand separator_operand = Operand(esp, 2 * kPointerSize);
+  Operand result_operand = Operand(esp, 1 * kPointerSize);
+  Operand array_length_operand = Operand(esp, 0);
+  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ cld();
+  // Check that the array is a JSArray
   __ test(array, Immediate(kSmiTagMask));
   __ j(zero, &bailout);
   __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -3394,140 +3392,226 @@
             1 << Map::kHasFastElements);
   __ j(zero, &bailout);
 
-  // If the array is empty, return the empty string.
-  __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
-  __ sar(scratch, 1);
-  Label non_trivial;
-  __ j(not_zero, &non_trivial);
-  __ mov(result, Factory::empty_string());
+  // If the array has length zero, return the empty string.
+  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+  __ sar(array_length, 1);
+  __ j(not_zero, &non_trivial_array);
+  __ mov(result_operand, Factory::empty_string());
   __ jmp(&done);
 
-  __ bind(&non_trivial);
-  __ mov(array_length, scratch);
+  // Save the array length.
+  __ bind(&non_trivial_array);
+  __ mov(array_length_operand, array_length);
 
-  __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
-  __ mov(elements, scratch);
-
+  // Save the FixedArray containing array's elements.
   // End of array's live range.
-  result_pos = array;
+  elements = array;
+  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
   array = no_reg;
 
 
-  // Check that the separator is a flat ascii string.
-  __ mov(current_string, separator);
-  __ test(current_string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  // If the separator is the empty string, replace it with NULL.
-  // The test for NULL is quicker than the empty string test, in a loop.
-  __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
-         Immediate(0));
-  Label separator_checked;
-  __ j(not_zero, &separator_checked);
-  __ mov(separator, Immediate(0));
-  __ bind(&separator_checked);
-
-  // Check that elements[0] is a flat ascii string, and copy it in new space.
-  __ mov(scratch, elements);
-  __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
-  __ test(current_string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Allocate space to copy it.  Round up the size to the alignment granularity.
-  __ mov(current_string_length,
-         FieldOperand(current_string, String::kLengthOffset));
-  __ shr(current_string_length, 1);
-
-  // Live registers and stack values:
-  //   current_string_length: length of elements[0].
-
-  // New string result in new space = elements[0]
-  __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
-                         index, no_reg, &bailout);
-  __ mov(result, result_pos);
-
-  // Adjust current_string_length to include padding bytes at end of string.
-  // Keep track of the number of padding bytes.
-  __ mov(new_padding_chars, current_string_length);
-  __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
-  __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
-  __ sub(new_padding_chars, Operand(current_string_length));
-  __ neg(new_padding_chars);
-  __ mov(padding_chars, new_padding_chars);
-
-  Label copy_loop_1_done;
-  Label copy_loop_1;
-  __ test(current_string_length, Operand(current_string_length));
-  __ j(zero, &copy_loop_1_done);
-  __ bind(&copy_loop_1);
-  __ sub(Operand(current_string_length), Immediate(kPointerSize));
-  __ mov(scratch, FieldOperand(current_string, current_string_length,
-                               times_1, SeqAsciiString::kHeaderSize));
-  __ mov(FieldOperand(result_pos, current_string_length,
-                      times_1, SeqAsciiString::kHeaderSize),
-         scratch);
-  __ j(not_zero, &copy_loop_1);
-  __ bind(&copy_loop_1_done);
-
-  __ mov(index, Immediate(1));
+  // Check that all array elements are sequential ASCII strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ Set(index, Immediate(0));
+  __ Set(string_length, Immediate(0));
   // Loop condition: while (index < length).
-  Label loop;
+  // Live loop registers: index, array_length, string,
+  //                      scratch, string_length, elements.
+  __ jmp(&loop_condition);
   __ bind(&loop);
-  __ cmp(index, array_length);
+  __ cmp(index, Operand(array_length));
   __ j(greater_equal, &done);
 
-  // If the separator is the empty string, signalled by NULL, skip it.
-  Label separator_done;
-  __ mov(current_string, separator);
-  __ test(current_string, Operand(current_string));
-  __ j(zero, &separator_done);
-
-  // Append separator to result.  It is known to be a flat ascii string.
-  __ AppendStringToTopOfNewSpace(current_string, current_string_length,
-                                 result_pos, scratch, scratch_2, result,
-                                 padding_chars, &bailout);
-  __ bind(&separator_done);
-
-  // Add next element of array to the end of the result.
-  // Get current_string = array[index].
-  __ mov(scratch, elements);
-  __ mov(current_string, FieldOperand(scratch, index,
+  __ mov(string, FieldOperand(elements, index,
                                       times_pointer_size,
                                       FixedArray::kHeaderSize));
-  // If current != flat ascii string drop result, return undefined.
-  __ test(current_string, Immediate(kSmiTagMask));
+  __ test(string, Immediate(kSmiTagMask));
   __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
-  __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, Immediate(
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
+  __ j(not_equal, &bailout);
+  __ add(string_length,
+         FieldOperand(string, SeqAsciiString::kLengthOffset));
+  __ j(overflow, &bailout);
+  __ add(Operand(index), Immediate(1));
+  __ bind(&loop_condition);
+  __ cmp(index, Operand(array_length));
+  __ j(less, &loop);
+
+  // If array_length is 1, return elements[0], a string.
+  __ cmp(array_length, 1);
+  __ j(not_equal, &not_size_one_array);
+  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+  __ mov(result_operand, scratch);
+  __ jmp(&done);
+
+  __ bind(&not_size_one_array);
+
+  // End of array_length live range.
+  result_pos = array_length;
+  array_length = no_reg;
+
+  // Live registers:
+  // string_length: Sum of string lengths, as a smi.
+  // elements: FixedArray of strings.
+
+  // Check that the separator is a flat ASCII string.
+  __ mov(string, separator_operand);
+  __ test(string, Immediate(kSmiTagMask));
+  __ j(zero, &bailout);
+  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, Immediate(
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
   __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
   __ j(not_equal, &bailout);
 
-  // Append current to the result.
-  __ AppendStringToTopOfNewSpace(current_string, current_string_length,
-                                 result_pos, scratch, scratch_2, result,
-                                 padding_chars, &bailout);
+  // Add (separator length times array_length) - separator length
+  // to string_length.
+  __ mov(scratch, separator_operand);
+  __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
+  __ imul(scratch, array_length_operand);
+  __ j(overflow, &bailout);
+  __ add(string_length, Operand(scratch));
+  __ j(overflow, &bailout);
+
+  __ shr(string_length, 1);
+  // Live registers and stack values:
+  //   string_length
+  //   elements
+  __ AllocateAsciiString(result_pos, string_length, scratch,
+                         index, string, &bailout);
+  __ mov(result_operand, result_pos);
+  __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+  __ mov(string, separator_operand);
+  __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+         Immediate(Smi::FromInt(1)));
+  __ j(equal, &one_char_separator);
+  __ j(greater, &long_separator);
+
+
+  // Empty separator case
+  __ mov(index, Immediate(0));
+  __ jmp(&loop_1_condition);
+  // Loop condition: while (index < length).
+  __ bind(&loop_1);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+  //   elements: the FixedArray of strings we are joining.
+
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
   __ add(Operand(index), Immediate(1));
-  __ jmp(&loop);  // End while (index < length).
+  __ bind(&loop_1_condition);
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_1);  // End while (index < length).
+  __ jmp(&done);
+
+
+
+  // One-character separator case
+  __ bind(&one_char_separator);
+  // Replace separator with its ascii character value.
+  __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ mov_b(separator_operand, scratch);
+
+  __ Set(index, Immediate(0));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator
+  __ jmp(&loop_2_entry);
+  // Loop condition: while (index < length).
+  __ bind(&loop_2);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+
+  // Copy the separator character to the result.
+  __ mov_b(scratch, separator_operand);
+  __ mov_b(Operand(result_pos, 0), scratch);
+  __ inc(result_pos);
+
+  __ bind(&loop_2_entry);
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+  __ add(Operand(index), Immediate(1));
+
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_2);  // End while (index < length).
+  __ jmp(&done);
+
+
+  // Long separator case (separator is more than one character).
+  __ bind(&long_separator);
+
+  __ Set(index, Immediate(0));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator
+  __ jmp(&loop_3_entry);
+  // Loop condition: while (index < length).
+  __ bind(&loop_3);
+  // Each iteration of the loop concatenates one string to the result.
+  // Live values in registers:
+  //   index: which element of the elements array we are adding to the result.
+  //   result_pos: the position to which we are currently copying characters.
+
+  // Copy the separator to the result.
+  __ mov(string, separator_operand);
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+
+  __ bind(&loop_3_entry);
+  // Get string = array[index].
+  __ mov(string, FieldOperand(elements, index,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ mov(string_length,
+         FieldOperand(string, String::kLengthOffset));
+  __ shr(string_length, 1);
+  __ lea(string,
+         FieldOperand(string, SeqAsciiString::kHeaderSize));
+  __ CopyBytes(string, result_pos, string_length, scratch);
+  __ add(Operand(index), Immediate(1));
+
+  __ cmp(index, array_length_operand);
+  __ j(less, &loop_3);  // End while (index < length).
+  __ jmp(&done);
+
 
   __ bind(&bailout);
-  __ mov(result, Factory::undefined_value());
+  __ mov(result_operand, Factory::undefined_value());
   __ bind(&done);
-  __ mov(eax, result);
+  __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(5 * kPointerSize));
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->Plug(eax);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d32f95d..24ee1fe 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,6 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "ia32/lithium-codegen-ia32.h"
 #include "code-stubs.h"
 #include "stub-cache.h"
@@ -54,6 +58,157 @@
 };
 
 
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+    : nodes_(32),
+      identified_cycles_(4),
+      result_(16),
+      next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+    const ZoneList<LMoveOperands>* moves,
+    LOperand* marker_operand) {
+  nodes_.Rewind(0);
+  identified_cycles_.Rewind(0);
+  result_.Rewind(0);
+  next_visited_id_ = 0;
+
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i], marker_operand);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+  ZoneList<LOperand*> cycle_operands(8);
+  cycle_operands.Add(marker_operand);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    cycle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  cycle_operands.Add(marker_operand);
+
+  for (int i = cycle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = cycle_operands[i];
+    LOperand* to = cycle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a cycle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
@@ -135,6 +290,17 @@
       __ j(not_zero, &loop);
     } else {
       __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+      // On windows, you may not access the stack more than one page below
+      // the most recently mapped page. To make the allocated area randomly
+      // accessible, we write to each page in turn (the value is irrelevant).
+      const int kPageSize = 4 * KB;
+      for (int offset = slots * kPointerSize - kPageSize;
+           offset > 0;
+           offset -= kPageSize) {
+        __ mov(Operand(esp, offset), eax);
+      }
+#endif
     }
   }
 
@@ -261,6 +427,45 @@
 }
 
 
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
 void LCodeGen::AddToTranslation(Translation* translation,
                                 LOperand* op,
                                 bool is_tagged) {
@@ -385,7 +590,7 @@
       ++frame_count;
     }
     Translation translation(&translations_, frame_count);
-    environment->WriteTranslation(this, &translation);
+    WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     environment->Register(deoptimization_index, translation.index());
     deoptimizations_.Add(environment);
@@ -564,8 +769,8 @@
   Register cpu_scratch = esi;
   bool destroys_cpu_scratch = false;
 
-  LGapResolver resolver(move->move_operands(), &marker_operand);
-  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  const ZoneList<LMoveOperands>* moves =
+      resolver_.Resolve(move->move_operands(), &marker_operand);
   for (int i = moves->length() - 1; i >= 0; --i) {
     LMoveOperands move = moves->at(i);
     LOperand* from = move.from();
@@ -1910,6 +2115,14 @@
 }
 
 
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  // TODO(antonm): load a context with a separate instruction.
+  Register result = ToRegister(instr->result());
+  __ LoadContext(result, instr->context_chain_length());
+  __ mov(result, ContextOperand(result, instr->slot_index()));
+}
+
+
 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   Register object = ToRegister(instr->input());
   Register result = ToRegister(instr->result());
@@ -2009,32 +2222,15 @@
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
   Register elements = ToRegister(instr->elements());
   Register key = ToRegister(instr->key());
-  Register result;
-  if (instr->load_result() != NULL) {
-    result = ToRegister(instr->load_result());
-  } else {
-    result = ToRegister(instr->result());
-    ASSERT(result.is(elements));
-  }
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(elements));
 
   // Load the result.
   __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
 
-  Representation r = instr->hydrogen()->representation();
-  if (r.IsInteger32()) {
-    // Untag and check for smi.
-    __ SmiUntag(result);
-    DeoptimizeIf(carry, instr->environment());
-  } else if (r.IsDouble()) {
-    EmitNumberUntagD(result,
-                     ToDoubleRegister(instr->result()),
-                     instr->environment());
-  } else {
-    // Check for the hole value.
-    ASSERT(r.IsTagged());
-    __ cmp(result, Factory::the_hole_value());
-    DeoptimizeIf(equal, instr->environment());
-  }
+  // Check for the hole value.
+  __ cmp(result, Factory::the_hole_value());
+  DeoptimizeIf(equal, instr->environment());
 }
 
 
@@ -2988,9 +3184,60 @@
       __ add(Operand(esp), Immediate(kDoubleSize));
       __ bind(&done);
     } else {
-      // This will bail out if the input was not in the int32 range (or,
-      // unfortunately, if the input was 0x80000000).
-      DeoptimizeIf(equal, instr->environment());
+      NearLabel done;
+      Register temp_reg = ToRegister(instr->temporary());
+      XMMRegister xmm_scratch = xmm0;
+
+      // If cvttsd2si succeeded, we're done. Otherwise, we attempt
+      // manual conversion.
+      __ j(not_equal, &done);
+
+      // Get high 32 bits of the input in result_reg and temp_reg.
+      __ pshufd(xmm_scratch, input_reg, 1);
+      __ movd(Operand(temp_reg), xmm_scratch);
+      __ mov(result_reg, temp_reg);
+
+      // Prepare negation mask in temp_reg.
+      __ sar(temp_reg, kBitsPerInt - 1);
+
+      // Extract the exponent from result_reg and subtract adjusted
+      // bias from it. The adjustment is selected in a way such that
+      // when the difference is zero, the answer is in the low 32 bits
+      // of the input, otherwise a shift has to be performed.
+      __ shr(result_reg, HeapNumber::kExponentShift);
+      __ and_(result_reg,
+              HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
+      __ sub(Operand(result_reg),
+             Immediate(HeapNumber::kExponentBias +
+                       HeapNumber::kExponentBits +
+                       HeapNumber::kMantissaBits));
+      // Don't handle big (> kMantissaBits + kExponentBits == 63) or
+      // special exponents.
+      DeoptimizeIf(greater, instr->environment());
+
+      // Zero out the sign and the exponent in the input (by shifting
+      // it to the left) and restore the implicit mantissa bit,
+      // i.e. convert the input to unsigned int64 shifted left by
+      // kExponentBits.
+      ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
+      // Minus zero has the most significant bit set and the other
+      // bits cleared.
+      __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
+      __ psllq(input_reg, HeapNumber::kExponentBits);
+      __ por(input_reg, xmm_scratch);
+
+      // Get the amount to shift the input right in xmm_scratch.
+      __ neg(result_reg);
+      __ movd(xmm_scratch, Operand(result_reg));
+
+      // Shift the input right and extract low 32 bits.
+      __ psrlq(input_reg, xmm_scratch);
+      __ movd(Operand(result_reg), input_reg);
+
+      // Use the prepared mask in temp_reg to negate the result if necessary.
+      __ xor_(result_reg, Operand(temp_reg));
+      __ sub(result_reg, Operand(temp_reg));
+      __ bind(&done);
     }
   } else {
     NearLabel done;
@@ -3067,13 +3314,13 @@
 }
 
 
-void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
-  if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+  if (Heap::InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
-        Factory::NewJSGlobalPropertyCell(prototype);
+        Factory::NewJSGlobalPropertyCell(object);
     __ mov(result, Operand::Cell(cell));
   } else {
-    __ mov(result, prototype);
+    __ mov(result, object);
   }
 }
 
@@ -3082,11 +3329,10 @@
   Register reg = ToRegister(instr->temp());
 
   Handle<JSObject> holder = instr->holder();
-  Handle<Map> receiver_map = instr->receiver_map();
-  Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+  Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadPrototype(reg, current_prototype);
+  LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -3096,7 +3342,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadPrototype(reg, current_prototype);
+    LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
@@ -3410,3 +3656,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 41ac39a..ef8fb5c 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,8 +40,30 @@
 
 // Forward declarations.
 class LDeferredCode;
+class LGapNode;
 class SafepointGenerator;
 
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver();
+  const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+                                         LOperand* marker_operand);
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  int next_visited_id_;
+};
+
 
 class LCodeGen BASE_EMBEDDED {
  public:
@@ -83,6 +105,9 @@
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
 
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
   // Declare methods that deal with the individual node types.
 #define DECLARE_DO(type) void Do##type(L##type* node);
   LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -150,7 +175,7 @@
                          int arity,
                          LInstruction* instr);
 
-  void LoadPrototype(Register result, Handle<JSObject> prototype);
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
 
   void RegisterLazyDeoptimization(LInstruction* instr);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@@ -230,6 +255,9 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 6355f16..254a47a 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -25,6 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "ia32/lithium-ia32.h"
 #include "ia32/lithium-codegen-ia32.h"
 
@@ -64,12 +68,12 @@
 }
 
 
-void LInstruction::PrintTo(StringStream* stream) const {
+void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
   if (HasResult()) {
-    result()->PrintTo(stream);
-    stream->Add(" ");
+    PrintOutputOperandTo(stream);
   }
+
   PrintDataTo(stream);
 
   if (HasEnvironment()) {
@@ -84,7 +88,25 @@
 }
 
 
-void LLabel::PrintDataTo(StringStream* stream) const {
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < I; i++) {
+    stream->Add(i == 0 ? "= " : " ");
+    inputs_.at(i)->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  if (this->HasResult()) {
+    this->result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
   LGap::PrintDataTo(stream);
   LLabel* rep = replacement();
   if (rep != NULL) {
@@ -93,32 +115,6 @@
 }
 
 
-bool LParallelMove::IsRedundant() const {
-  for (int i = 0; i < move_operands_.length(); ++i) {
-    if (!move_operands_[i].IsRedundant()) return false;
-  }
-  return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
-  for (int i = move_operands_.length() - 1; i >= 0; --i) {
-    if (!move_operands_[i].IsEliminated()) {
-      LOperand* from = move_operands_[i].from();
-      LOperand* to = move_operands_[i].to();
-      if (from->Equals(to)) {
-        to->PrintTo(stream);
-      } else {
-        to->PrintTo(stream);
-        stream->Add(" = ");
-        from->PrintTo(stream);
-      }
-      stream->Add("; ");
-    }
-  }
-}
-
-
 bool LGap::IsRedundant() const {
   for (int i = 0; i < 4; i++) {
     if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
@@ -130,7 +126,7 @@
 }
 
 
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < 4; i++) {
     stream->Add("(");
     if (parallel_moves_[i] != NULL) {
@@ -169,27 +165,18 @@
 }
 
 
-
-void LBinaryOperation::PrintDataTo(StringStream* stream) const {
-  stream->Add("= ");
-  left()->PrintTo(stream);
-  stream->Add(" ");
-  right()->PrintTo(stream);
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) const {
+void LGoto::PrintDataTo(StringStream* stream) {
   stream->Add("B%d", block_id());
 }
 
 
-void LBranch::PrintDataTo(StringStream* stream) const {
+void LBranch::PrintDataTo(StringStream* stream) {
   stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
   input()->PrintTo(stream);
 }
 
 
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   left()->PrintTo(stream);
   stream->Add(" %s ", Token::String(op()));
@@ -198,7 +185,7 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   input()->PrintTo(stream);
   stream->Add(is_strict() ? " === null" : " == null");
@@ -206,35 +193,35 @@
 }
 
 
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_object(");
   input()->PrintTo(stream);
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
 
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   input()->PrintTo(stream);
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
 
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   input()->PrintTo(stream);
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
 
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_cached_array_index(");
   input()->PrintTo(stream);
   stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
 }
 
 
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if class_of_test(");
   input()->PrintTo(stream);
   stream->Add(", \"%o\") then B%d else B%d",
@@ -244,13 +231,13 @@
 }
 
 
-void LTypeofIs::PrintDataTo(StringStream* stream) const {
+void LTypeofIs::PrintDataTo(StringStream* stream) {
   input()->PrintTo(stream);
   stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
 }
 
 
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if typeof ");
   input()->PrintTo(stream);
   stream->Add(" == \"%s\" then B%d else B%d",
@@ -259,59 +246,59 @@
 }
 
 
-void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
   stream->Add("#%d / ", arity());
 }
 
 
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
   stream->Add("/%s ", hydrogen()->OpName());
   input()->PrintTo(stream);
 }
 
 
-void LCallKeyed::PrintDataTo(StringStream* stream) const {
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[ecx] #%d / ", arity());
 }
 
 
-void LCallNamed::PrintDataTo(StringStream* stream) const {
+void LCallNamed::PrintDataTo(StringStream* stream) {
   SmartPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
 
-void LCallGlobal::PrintDataTo(StringStream* stream) const {
+void LCallGlobal::PrintDataTo(StringStream* stream) {
   SmartPointer<char> name_string = name()->ToCString();
   stream->Add("%s #%d / ", *name_string, arity());
 }
 
 
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
   stream->Add("#%d / ", arity());
 }
 
 
-void LCallNew::PrintDataTo(StringStream* stream) const {
-  LUnaryOperation::PrintDataTo(stream);
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  input()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
 }
 
 
-void LClassOfTest::PrintDataTo(StringStream* stream) const {
+void LClassOfTest::PrintDataTo(StringStream* stream) {
   stream->Add("= class_of_test(");
   input()->PrintTo(stream);
   stream->Add(", \"%o\")", *hydrogen()->class_name());
 }
 
 
-void LUnaryOperation::PrintDataTo(StringStream* stream) const {
-  stream->Add("= ");
-  input()->PrintTo(stream);
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
   arguments()->PrintTo(stream);
 
   stream->Add(" length ");
@@ -322,11 +309,6 @@
 }
 
 
-void LChunk::Verify() const {
-  // TODO(twuerthinger): Implement verification for chunk.
-}
-
-
 int LChunk::GetNextSpillIndex(bool is_double) {
   // Skip a slot if for a double-width slot.
   if (is_double) spill_slot_count_++;
@@ -381,7 +363,7 @@
 }
 
 
-void LStoreNamed::PrintDataTo(StringStream* stream) const {
+void LStoreNamed::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(".");
   stream->Add(*String::cast(*name())->ToCString());
@@ -390,7 +372,7 @@
 }
 
 
-void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add("[");
   key()->PrintTo(stream);
@@ -598,33 +580,54 @@
 }
 
 
-LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+                                    LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
   return Define(instr, new LUnallocated(LUnallocated::NONE));
 }
 
 
-LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateInstruction<1, I, T>* instr) {
   return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
-LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateInstruction<1, I, T>* instr,
+    int index) {
   return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
-LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateInstruction<1, I, T>* instr) {
   return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
-LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                         Register reg) {
   return Define(instr, ToUnallocated(reg));
 }
 
 
-LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
-                                               XMMRegister reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateInstruction<1, I, T>* instr,
+    XMMRegister reg) {
   return Define(instr, ToUnallocated(reg));
 }
 
@@ -692,20 +695,6 @@
 }
 
 
-LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
-  allocator_->RecordDefinition(current_instruction_, result);
-  instr->set_result(result);
-  return instr;
-}
-
-
-LOperand* LChunkBuilder::Temp() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
-  allocator_->RecordTemporary(operand);
-  return operand;
-}
-
-
 LUnallocated* LChunkBuilder::TempRegister() {
   LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
   allocator_->RecordTemporary(operand);
@@ -781,10 +770,10 @@
     can_deopt = !can_truncate;
   }
 
-  LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
-  if (can_deopt) AssignEnvironment(result);
-  return result;
+  LShiftI* result = new LShiftI(op, left, right, can_deopt);
+  return can_deopt
+      ? AssignEnvironment(DefineSameAsFirst(result))
+      : DefineSameAsFirst(result);
 }
 
 
@@ -813,7 +802,7 @@
   ASSERT(right->representation().IsTagged());
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
-  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -910,59 +899,6 @@
 }
 
 
-void LEnvironment::WriteTranslation(LCodeGen* cgen,
-                                    Translation* translation) const {
-  if (this == NULL) return;
-
-  // The translation includes one command per value in the environment.
-  int translation_size = values()->length();
-  // The output frame height does not include the parameters.
-  int height = translation_size - parameter_count();
-
-  outer()->WriteTranslation(cgen, translation);
-  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
-  translation->BeginFrame(ast_id(), closure_id, height);
-  for (int i = 0; i < translation_size; ++i) {
-    LOperand* value = values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (spilled_registers_ != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          spilled_registers_[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        cgen->AddToTranslation(translation,
-                               spilled_registers_[value->index()],
-                               HasTaggedValueAt(i));
-      } else if (value->IsDoubleRegister() &&
-                 spilled_double_registers_[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        cgen->AddToTranslation(translation,
-                               spilled_double_registers_[value->index()],
-                               false);
-      }
-    }
-
-    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
-  }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) const {
-  stream->Add("[id=%d|", ast_id());
-  stream->Add("[parameters=%d|", parameter_count());
-  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
-  for (int i = 0; i < values_.length(); ++i) {
-    if (i != 0) stream->Add(";");
-    if (values_[i] == NULL) {
-      stream->Add("[hole]");
-    } else {
-      values_[i]->PrintTo(stream);
-    }
-  }
-  stream->Add("]");
-}
-
-
 LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
   if (hydrogen_env == NULL) return NULL;
 
@@ -999,10 +935,11 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
-                                   instr->include_stack_check());
-  if (instr->include_stack_check()) result = AssignPointerMap(result);
-  return result;
+  LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                            instr->include_stack_check());
+  return (instr->include_stack_check())
+      ? AssignPointerMap(result)
+      : result;
 }
 
 
@@ -1029,32 +966,33 @@
       Token::Value op = compare->token();
       HValue* left = compare->left();
       HValue* right = compare->right();
-      if (left->representation().IsInteger32()) {
+      Representation r = compare->GetInputRepresentation();
+      if (r.IsInteger32()) {
+        ASSERT(left->representation().IsInteger32());
         ASSERT(right->representation().IsInteger32());
-        return new LCmpIDAndBranch(op,
-                                   UseRegisterAtStart(left),
+
+        return new LCmpIDAndBranch(UseRegisterAtStart(left),
                                    UseOrConstantAtStart(right),
                                    first_id,
-                                   second_id,
-                                   false);
-      } else if (left->representation().IsDouble()) {
+                                   second_id);
+      } else if (r.IsDouble()) {
+        ASSERT(left->representation().IsDouble());
         ASSERT(right->representation().IsDouble());
-        return new LCmpIDAndBranch(op,
-                                   UseRegisterAtStart(left),
+
+        return new LCmpIDAndBranch(UseRegisterAtStart(left),
                                    UseRegisterAtStart(right),
                                    first_id,
-                                   second_id,
-                                   true);
+                                   second_id);
       } else {
         ASSERT(left->representation().IsTagged());
         ASSERT(right->representation().IsTagged());
         bool reversed = op == Token::GT || op == Token::LTE;
         LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
         LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
-        LInstruction* result = new LCmpTAndBranch(left_operand,
-                                                  right_operand,
-                                                  first_id,
-                                                  second_id);
+        LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+                                                    right_operand,
+                                                    first_id,
+                                                    second_id);
         return MarkAsCall(result, instr);
       }
     } else if (v->IsIsSmi()) {
@@ -1085,7 +1023,6 @@
       // We only need a temp register for non-strict compare.
       LOperand* temp = compare->is_strict() ? NULL : TempRegister();
       return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  compare->is_strict(),
                                   temp,
                                   first_id,
                                   second_id);
@@ -1108,7 +1045,7 @@
                                          second_id);
     } else if (v->IsInstanceOf()) {
       HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LInstruction* result =
+      LInstanceOfAndBranch* result =
           new LInstanceOfAndBranch(
               UseFixed(instance_of->left(), InstanceofStub::left()),
               UseFixed(instance_of->right(), InstanceofStub::right()),
@@ -1155,7 +1092,7 @@
 
 
 LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LInstruction* result =
+  LInstanceOf* result =
       new LInstanceOf(UseFixed(instr->left(), InstanceofStub::left()),
                       UseFixed(instr->right(), InstanceofStub::right()));
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1164,7 +1101,7 @@
 
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
-  LInstruction* result =
+  LInstanceOfKnownGlobal* result =
       new LInstanceOfKnownGlobal(
           UseFixed(instr->value(), InstanceofStub::left()),
           FixedTemp(edi));
@@ -1178,10 +1115,10 @@
   LOperand* receiver = UseFixed(instr->receiver(), eax);
   LOperand* length = UseRegisterAtStart(instr->length());
   LOperand* elements = UseRegisterAtStart(instr->elements());
-  LInstruction* result = new LApplyArguments(function,
-                                             receiver,
-                                             length,
-                                             elements);
+  LApplyArguments* result = new LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements);
   return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1214,11 +1151,11 @@
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LInstruction* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
-    LInstruction* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1267,7 +1204,7 @@
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
-  LInstruction* result = new LCallNew(constructor);
+  LCallNew* result = new LCallNew(constructor);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1307,7 +1244,9 @@
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
-  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LBitNotI* result = new LBitNotI(input);
+  return DefineSameAsFirst(result);
 }
 
 
@@ -1347,12 +1286,12 @@
     FixedTemp(edx);
     LOperand* value = UseFixed(instr->left(), eax);
     LOperand* divisor = UseRegister(instr->right());
-    LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
-        instr->CheckFlag(HValue::kCanBeDivByZero)) {
-      result = AssignEnvironment(result);
-    }
-    return result;
+    LModI* mod = new LModI(value, divisor);
+    LInstruction* result = DefineFixed(mod, edx);
+    return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+            instr->CheckFlag(HValue::kCanBeDivByZero))
+        ? AssignEnvironment(result)
+        : result;
   } else if (instr->representation().IsTagged()) {
     return DoArithmeticT(Token::MOD, instr);
   } else {
@@ -1449,21 +1388,26 @@
 
 LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
   Token::Value op = instr->token();
-  if (instr->left()->representation().IsInteger32()) {
+  Representation r = instr->GetInputRepresentation();
+  if (r.IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(op, left, right, false));
-  } else if (instr->left()->representation().IsDouble()) {
+    return DefineAsRegister(new LCmpID(left, right));
+  } else if (r.IsDouble()) {
+    ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return DefineAsRegister(new LCmpID(op, left, right, true));
+    return DefineAsRegister(new LCmpID(left, right));
   } else {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
     bool reversed = (op == Token::GT || op == Token::LTE);
     LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
     LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
-    LInstruction* result = new LCmpT(left, right);
+    LCmpT* result = new LCmpT(left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 }
@@ -1473,7 +1417,7 @@
     HCompareJSObjectEq* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LInstruction* result = new LCmpJSObjectEq(left, right);
+  LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
   return DefineAsRegister(result);
 }
 
@@ -1482,8 +1426,7 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LIsNull(value,
-                                      instr->is_strict()));
+  return DefineAsRegister(new LIsNull(value));
 }
 
 
@@ -1542,7 +1485,7 @@
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LInstruction* result = new LValueOf(object, TempRegister());
+  LValueOf* result = new LValueOf(object, TempRegister());
   return AssignEnvironment(DefineSameAsFirst(result));
 }
 
@@ -1565,7 +1508,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LInstruction* res = new LNumberUntagD(value);
+      LNumberUntagD* res = new LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1576,7 +1519,7 @@
             (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
-        LInstruction* res = new LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
         return DefineSameAsFirst(new LSmiUntag(value, needs_check));
@@ -1589,12 +1532,16 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LInstruction* result = new LNumberTagD(value, temp);
+      LNumberTagD* result = new LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
-      LOperand* value = UseRegister(instr->value());
-      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+      bool needs_temp = instr->CanTruncateToInt32() &&
+          !CpuFeatures::IsSupported(SSE3);
+      LOperand* value = needs_temp ?
+          UseTempRegister(instr->value()) : UseRegister(instr->value());
+      LOperand* temp = needs_temp ? TempRegister() : NULL;
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
@@ -1603,7 +1550,7 @@
       if (val->HasRange() && val->range()->IsInSmiRange()) {
         return DefineSameAsFirst(new LSmiTag(value));
       } else {
-        LInstruction* result = new LNumberTagI(value);
+        LNumberTagI* result = new LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
@@ -1625,17 +1572,14 @@
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  LInstruction* result = new LCheckInstanceType(value, temp);
+  LCheckInstanceType* result = new LCheckInstanceType(value, temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LInstruction* result =
-      new LCheckPrototypeMaps(temp,
-                              instr->holder(),
-                              instr->receiver_map());
+  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
@@ -1654,7 +1598,7 @@
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new LCheckMap(value);
+  LCheckMap* result = new LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1675,14 +1619,14 @@
   } else if (r.IsTagged()) {
     return DefineAsRegister(new LConstantT(instr->handle()));
   } else {
-    Abort("unsupported constant of type double");
+    UNREACHABLE();
     return NULL;
   }
 }
 
 
 LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LInstruction* result = new LLoadGlobal;
+  LLoadGlobal* result = new LLoadGlobal;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
@@ -1694,16 +1638,22 @@
 }
 
 
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  return DefineAsRegister(new LLoadContextSlot);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
-  return DefineAsRegister(
-      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+  ASSERT(instr->representation().IsTagged());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new LLoadNamedField(obj));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object = UseFixed(instr->object(), eax);
-  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
-  return MarkAsCall(result, instr);
+  LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
@@ -1723,23 +1673,12 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
     HLoadKeyedFastElement* instr) {
-  Representation r = instr->representation();
-  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterAtStart(instr->key());
-  LOperand* load_result = NULL;
-  // Double needs an extra temp, because the result is converted from heap
-  // number to a double register.
-  if (r.IsDouble()) load_result = TempRegister();
-  LInstruction* result = new LLoadKeyedFastElement(obj,
-                                                   key,
-                                                   load_result);
-  if (r.IsDouble()) {
-    result = DefineAsRegister(result);
-  } else {
-    result = DefineSameAsFirst(result);
-  }
-  return AssignEnvironment(result);
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
@@ -1747,9 +1686,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* key = UseFixed(instr->key(), eax);
 
-  LInstruction* result =
-      DefineFixed(new LLoadKeyedGeneric(object, key), eax);
-  return MarkAsCall(result, instr);
+  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+  return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
@@ -1801,14 +1739,7 @@
   LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
       ? TempRegister() : NULL;
 
-  return new LStoreNamedField(obj,
-                              instr->name(),
-                              val,
-                              instr->is_in_object(),
-                              instr->offset(),
-                              temp,
-                              needs_write_barrier,
-                              instr->transition());
+  return new LStoreNamedField(obj, val, temp);
 }
 
 
@@ -1816,7 +1747,7 @@
   LOperand* obj = UseFixed(instr->object(), edx);
   LOperand* val = UseFixed(instr->value(), eax);
 
-  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  LStoreNamedGeneric* result = new LStoreNamedGeneric(obj, val);
   return MarkAsCall(result, instr);
 }
 
@@ -1842,8 +1773,8 @@
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LInstruction* result = new LDeleteProperty(Use(instr->object()),
-                                             UseOrConstant(instr->key()));
+  LDeleteProperty* result = new LDeleteProperty(Use(instr->object()),
+                                                UseOrConstant(instr->key()));
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1884,13 +1815,13 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
-  return DefineAsRegister(AssignEnvironment(result));
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LInstruction* result = new LTypeof(UseAtStart(instr->value()));
+  LTypeof* result = new LTypeof(UseAtStart(instr->value()));
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1919,8 +1850,8 @@
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LInstruction* result = new LLazyBailout;
-    result = AssignEnvironment(result);
+    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LInstruction* result = AssignEnvironment(lazy_bailout);
     instructions_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
     ClearInstructionPendingDeoptimizationEnvironment();
@@ -1956,21 +1887,6 @@
 }
 
 
-void LPointerMap::RecordPointer(LOperand* op) {
-  // Do not record arguments as pointers.
-  if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) const {
-  stream->Add("{");
-  for (int i = 0; i < pointer_operands_.length(); ++i) {
-    if (i != 0) stream->Add(";");
-    pointer_operands_[i]->PrintTo(stream);
-  }
-  stream->Add("} @%d", position());
-}
-
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4b0db40..07f0a8d 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -38,8 +38,6 @@
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
 
 
 // Type hierarchy:
@@ -79,16 +77,20 @@
 //   LCallNamed
 //   LCallRuntime
 //   LCallStub
+//   LCheckPrototypeMaps
 //   LConstant
 //     LConstantD
 //     LConstantI
 //     LConstantT
 //   LDeoptimize
 //   LFunctionLiteral
+//   LGap
+//     LLabel
 //   LGlobalObject
 //   LGlobalReceiver
-//   LLabel
-//   LLayzBailout
+//   LGoto
+//   LLazyBailout
+//   LLoadContextSlot
 //   LLoadGlobal
 //   LMaterializedLiteral
 //     LArrayLiteral
@@ -111,7 +113,6 @@
 //     LCheckFunction
 //     LCheckInstanceType
 //     LCheckMap
-//     LCheckPrototypeMaps
 //     LCheckSmi
 //     LClassOfTest
 //     LClassOfTestAndBranch
@@ -225,6 +226,7 @@
   V(ClassOfTestAndBranch)                       \
   V(Label)                                      \
   V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadGlobal)                                 \
   V(LoadKeyedFastElement)                       \
@@ -292,8 +294,9 @@
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
-  virtual void PrintTo(StringStream* stream) const;
-  virtual void PrintDataTo(StringStream* stream) const { }
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
   // Declare virtual type testers.
 #define DECLARE_DO(type) virtual bool Is##type() const { return false; }
@@ -309,9 +312,7 @@
   LPointerMap* pointer_map() const { return pointer_map_.get(); }
   bool HasPointerMap() const { return pointer_map_.is_set(); }
 
-  void set_result(LOperand* operand) { result_.set(operand); }
-  LOperand* result() const { return result_.get(); }
-  bool HasResult() const { return result_.is_set(); }
+  virtual bool HasResult() const = 0;
 
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -329,34 +330,66 @@
  private:
   SetOncePointer<LEnvironment> environment_;
   SetOncePointer<LPointerMap> pointer_map_;
-  SetOncePointer<LOperand> result_;
   HValue* hydrogen_value_;
   SetOncePointer<LEnvironment> deoptimization_environment_;
 };
 
 
-class LParallelMove : public ZoneObject {
+template<typename T, int N>
+class OperandContainer {
  public:
-  LParallelMove() : move_operands_(4) { }
-
-  void AddMove(LOperand* from, LOperand* to) {
-    move_operands_.Add(LMoveOperands(from, to));
+  OperandContainer() {
+    for (int i = 0; i < N; i++) elems_[i] = NULL;
   }
-
-  bool IsRedundant() const;
-
-  const ZoneList<LMoveOperands>* move_operands() const {
-    return &move_operands_;
-  }
-
-  void PrintDataTo(StringStream* stream) const;
-
+  int length() const { return N; }
+  T at(int i) const { return elems_[i]; }
+  void set_at(int i, T value) { elems_[i] = value; }
  private:
-  ZoneList<LMoveOperands> move_operands_;
+  T elems_[N];
 };
 
 
-class LGap: public LInstruction {
+template<typename T>
+class OperandContainer<T, 0> {
+ public:
+  int length() const { return 0; }
+  T at(int i) const {
+    UNREACHABLE();
+    return NULL;
+  }
+  void set_at(int i, T value) {
+    UNREACHABLE();
+  }
+};
+
+
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const { return R != 0; }
+  void set_result(LOperand* operand) { outputs_.set_at(0, operand); }
+  LOperand* result() const { return outputs_.at(0); }
+
+  int InputCount() const { return inputs_.length(); }
+  LOperand* InputAt(int i) const { return inputs_.at(i); }
+  void SetInputAt(int i, LOperand* operand) { inputs_.set_at(i, operand); }
+
+  int TempCount() const { return temps_.length(); }
+  LOperand* TempAt(int i) const { return temps_.at(i); }
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+ private:
+  OperandContainer<LOperand*, R> outputs_;
+  OperandContainer<LOperand*, I> inputs_;
+  OperandContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGap(HBasicBlock* block)
       : block_(block) {
@@ -367,7 +400,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   bool IsRedundant() const;
 
@@ -397,13 +430,13 @@
 };
 
 
-class LGoto: public LInstruction {
+class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
   LGoto(int block_id, bool include_stack_check = false)
     : block_id_(block_id), include_stack_check_(include_stack_check) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int block_id() const { return block_id_; }
@@ -415,7 +448,7 @@
 };
 
 
-class LLazyBailout: public LInstruction {
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
  public:
   LLazyBailout() : gap_instructions_size_(0) { }
 
@@ -431,7 +464,7 @@
 };
 
 
-class LDeoptimize: public LInstruction {
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
 };
@@ -444,7 +477,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -459,13 +492,13 @@
 };
 
 
-class LParameter: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub: public LInstruction {
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
   DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -476,96 +509,89 @@
 };
 
 
-class LUnknownOSRValue: public LInstruction {
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
 
-class LUnaryOperation: public LInstruction {
+template<int R>
+class LUnaryOperation: public LTemplateInstruction<R, 1, 0> {
  public:
-  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+  explicit LUnaryOperation<R>(LOperand* input) {
+    this->SetInputAt(0, input);
+  }
+
+  LOperand* input() const { return this->InputAt(0); }
 
   DECLARE_INSTRUCTION(UnaryOperation)
-
-  LOperand* input() const { return input_; }
-
-  virtual void PrintDataTo(StringStream* stream) const;
-
- private:
-  LOperand* input_;
 };
 
 
-class LBinaryOperation: public LInstruction {
+template<int R>
+class LBinaryOperation: public LTemplateInstruction<R, 2, 0> {
  public:
-  LBinaryOperation(LOperand* left, LOperand* right)
-      : left_(left), right_(right) { }
+  LBinaryOperation(LOperand* left, LOperand* right) {
+    this->SetInputAt(0, left);
+    this->SetInputAt(1, right);
+  }
 
   DECLARE_INSTRUCTION(BinaryOperation)
 
-  LOperand* left() const { return left_; }
-  LOperand* right() const { return right_; }
-  virtual void PrintDataTo(StringStream* stream) const;
-
- private:
-  LOperand* left_;
-  LOperand* right_;
+  LOperand* left() const { return this->InputAt(0); }
+  LOperand* right() const { return this->InputAt(1); }
 };
 
 
-class LApplyArguments: public LBinaryOperation {
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
                   LOperand* length,
-                  LOperand* elements)
-      : LBinaryOperation(function, receiver),
-        length_(length),
-        elements_(elements) { }
+                  LOperand* elements) {
+    this->SetInputAt(0, function);
+    this->SetInputAt(1, receiver);
+    this->SetInputAt(2, length);
+    this->SetInputAt(3, elements);
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
 
-  LOperand* function() const { return left(); }
-  LOperand* receiver() const { return right(); }
-  LOperand* length() const { return length_; }
-  LOperand* elements() const { return elements_; }
-
- private:
-  LOperand* length_;
-  LOperand* elements_;
+  LOperand* function() const { return InputAt(0); }
+  LOperand* receiver() const { return InputAt(1); }
+  LOperand* length() const { return InputAt(2); }
+  LOperand* elements() const { return InputAt(3); }
 };
 
 
-class LAccessArgumentsAt: public LInstruction {
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
  public:
-  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
-      : arguments_(arguments), length_(length), index_(index) { }
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    this->SetInputAt(0, arguments);
+    this->SetInputAt(1, length);
+    this->SetInputAt(2, index);
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  LOperand* arguments() const { return arguments_; }
-  LOperand* length() const { return length_; }
-  LOperand* index() const { return index_; }
+  LOperand* arguments() const { return this->InputAt(0); }
+  LOperand* length() const { return this->InputAt(1); }
+  LOperand* index() const { return this->InputAt(2); }
 
-  virtual void PrintDataTo(StringStream* stream) const;
-
- private:
-  LOperand* arguments_;
-  LOperand* length_;
-  LOperand* index_;
+  virtual void PrintDataTo(StringStream* stream);
 };
 
 
-class LArgumentsLength: public LUnaryOperation {
+class LArgumentsLength: public LUnaryOperation<1> {
  public:
-  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+  explicit LArgumentsLength(LOperand* elements)
+      : LUnaryOperation<1>(elements) {}
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
 };
 
 
-class LArgumentsElements: public LInstruction {
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
  public:
   LArgumentsElements() { }
 
@@ -573,29 +599,29 @@
 };
 
 
-class LModI: public LBinaryOperation {
+class LModI: public LBinaryOperation<1> {
  public:
-  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
   DECLARE_HYDROGEN_ACCESSOR(Mod)
 };
 
 
-class LDivI: public LBinaryOperation {
+class LDivI: public LBinaryOperation<1> {
  public:
   LDivI(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) { }
+      : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
   DECLARE_HYDROGEN_ACCESSOR(Div)
 };
 
 
-class LMulI: public LBinaryOperation {
+class LMulI: public LBinaryOperation<1> {
  public:
   LMulI(LOperand* left, LOperand* right, LOperand* temp)
-      : LBinaryOperation(left, right), temp_(temp) { }
+      : LBinaryOperation<1>(left, right), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
   DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -607,36 +633,33 @@
 };
 
 
-class LCmpID: public LBinaryOperation {
+class LCmpID: public LBinaryOperation<1> {
  public:
-  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
-      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+  LCmpID(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
 
-  Token::Value op() const { return op_; }
-  bool is_double() const { return is_double_; }
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-
- private:
-  Token::Value op_;
-  bool is_double_;
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
 };
 
 
 class LCmpIDAndBranch: public LCmpID {
  public:
-  LCmpIDAndBranch(Token::Value op,
-                  LOperand* left,
+  LCmpIDAndBranch(LOperand* left,
                   LOperand* right,
                   int true_block_id,
-                  int false_block_id,
-                  bool is_double)
-      : LCmpID(op, left, right, is_double),
+                  int false_block_id)
+      : LCmpID(left, right),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -648,23 +671,23 @@
 };
 
 
-class LUnaryMathOperation: public LUnaryOperation {
+class LUnaryMathOperation: public LUnaryOperation<1> {
  public:
   explicit LUnaryMathOperation(LOperand* value)
-      : LUnaryOperation(value) { }
+      : LUnaryOperation<1>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   BuiltinFunctionId op() const { return hydrogen()->op(); }
 };
 
 
-class LCmpJSObjectEq: public LBinaryOperation {
+class LCmpJSObjectEq: public LBinaryOperation<1> {
  public:
   LCmpJSObjectEq(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) {}
+      : LBinaryOperation<1>(left, right) {}
 
   DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
 };
@@ -692,34 +715,30 @@
 };
 
 
-class LIsNull: public LUnaryOperation {
+class LIsNull: public LUnaryOperation<1> {
  public:
-  LIsNull(LOperand* value, bool is_strict)
-      : LUnaryOperation(value), is_strict_(is_strict) {}
+  explicit LIsNull(LOperand* value) : LUnaryOperation<1>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+  DECLARE_HYDROGEN_ACCESSOR(IsNull)
 
-  bool is_strict() const { return is_strict_; }
-
- private:
-  bool is_strict_;
+  bool is_strict() const { return hydrogen()->is_strict(); }
 };
 
 
 class LIsNullAndBranch: public LIsNull {
  public:
   LIsNullAndBranch(LOperand* value,
-                   bool is_strict,
                    LOperand* temp,
                    int true_block_id,
                    int false_block_id)
-      : LIsNull(value, is_strict),
+      : LIsNull(value),
         temp_(temp),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -734,10 +753,10 @@
 };
 
 
-class LIsObject: public LUnaryOperation {
+class LIsObject: public LUnaryOperation<1> {
  public:
   LIsObject(LOperand* value, LOperand* temp)
-      : LUnaryOperation(value), temp_(temp) {}
+      : LUnaryOperation<1>(value), temp_(temp) {}
 
   DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
 
@@ -761,7 +780,7 @@
         false_block_id_(false_block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -776,9 +795,9 @@
 };
 
 
-class LIsSmi: public LUnaryOperation {
+class LIsSmi: public LUnaryOperation<1> {
  public:
-  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+  explicit LIsSmi(LOperand* value) : LUnaryOperation<1>(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
   DECLARE_HYDROGEN_ACCESSOR(IsSmi)
@@ -795,7 +814,7 @@
         false_block_id_(false_block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -807,10 +826,10 @@
 };
 
 
-class LHasInstanceType: public LUnaryOperation {
+class LHasInstanceType: public LUnaryOperation<1> {
  public:
   explicit LHasInstanceType(LOperand* value)
-      : LUnaryOperation(value) { }
+      : LUnaryOperation<1>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
@@ -833,7 +852,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
                                "has-instance-type-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -848,9 +867,9 @@
 };
 
 
-class LHasCachedArrayIndex: public LUnaryOperation {
+class LHasCachedArrayIndex: public LUnaryOperation<1> {
  public:
-  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation<1>(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
@@ -868,7 +887,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -880,20 +899,20 @@
 };
 
 
-class LClassOfTest: public LUnaryOperation {
+class LClassOfTest: public LUnaryOperation<1> {
  public:
   LClassOfTest(LOperand* value, LOperand* temp)
-      : LUnaryOperation(value), temporary_(temp) {}
+      : LUnaryOperation<1>(value), temporary_(temp) {}
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   LOperand* temporary() { return temporary_; }
 
  private:
-  LOperand *temporary_;
+  LOperand* temporary_;
 };
 
 
@@ -911,7 +930,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
                                "class-of-test-and-branch")
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -925,9 +944,9 @@
 };
 
 
-class LCmpT: public LBinaryOperation {
+class LCmpT: public LBinaryOperation<1> {
  public:
-  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) {}
 
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(Compare)
@@ -957,10 +976,10 @@
 };
 
 
-class LInstanceOf: public LBinaryOperation {
+class LInstanceOf: public LBinaryOperation<1> {
  public:
   LInstanceOf(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) { }
+      : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
 };
@@ -987,10 +1006,10 @@
 };
 
 
-class LInstanceOfKnownGlobal: public LUnaryOperation {
+class LInstanceOfKnownGlobal: public LUnaryOperation<1> {
  public:
   LInstanceOfKnownGlobal(LOperand* left, LOperand* temp)
-      : LUnaryOperation(left), temp_(temp) { }
+      : LUnaryOperation<1>(left), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
                                "instance-of-known-global")
@@ -1004,10 +1023,10 @@
 };
 
 
-class LBoundsCheck: public LBinaryOperation {
+class LBoundsCheck: public LBinaryOperation<0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length)
-      : LBinaryOperation(index, length) { }
+      : LBinaryOperation<0>(index, length) { }
 
   LOperand* index() const { return left(); }
   LOperand* length() const { return right(); }
@@ -1016,10 +1035,10 @@
 };
 
 
-class LBitI: public LBinaryOperation {
+class LBitI: public LBinaryOperation<1> {
  public:
   LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right), op_(op) { }
+      : LBinaryOperation<1>(left, right), op_(op) { }
 
   Token::Value op() const { return op_; }
 
@@ -1030,10 +1049,10 @@
 };
 
 
-class LShiftI: public LBinaryOperation {
+class LShiftI: public LBinaryOperation<1> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
-      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+      : LBinaryOperation<1>(left, right), op_(op), can_deopt_(can_deopt) { }
 
   Token::Value op() const { return op_; }
 
@@ -1047,17 +1066,17 @@
 };
 
 
-class LSubI: public LBinaryOperation {
+class LSubI: public LBinaryOperation<1> {
  public:
   LSubI(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) { }
+      : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
   DECLARE_HYDROGEN_ACCESSOR(Sub)
 };
 
 
-class LConstant: public LInstruction {
+class LConstant: public LTemplateInstruction<1, 0, 0> {
   DECLARE_INSTRUCTION(Constant)
 };
 
@@ -1098,17 +1117,17 @@
 };
 
 
-class LBranch: public LUnaryOperation {
+class LBranch: public LUnaryOperation<0> {
  public:
   LBranch(LOperand* input, int true_block_id, int false_block_id)
-      : LUnaryOperation(input),
+      : LUnaryOperation<0>(input),
         true_block_id_(true_block_id),
         false_block_id_(false_block_id) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Value)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -1120,9 +1139,9 @@
 };
 
 
-class LCmpMapAndBranch: public LUnaryOperation {
+class LCmpMapAndBranch: public LUnaryOperation<0> {
  public:
-  explicit LCmpMapAndBranch(LOperand* value) : LUnaryOperation(value) { }
+  explicit LCmpMapAndBranch(LOperand* value) : LUnaryOperation<0>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
@@ -1139,28 +1158,28 @@
 };
 
 
-class LJSArrayLength: public LUnaryOperation {
+class LJSArrayLength: public LUnaryOperation<1> {
  public:
-  explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { }
+  explicit LJSArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
 
   DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
   DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
 };
 
 
-class LFixedArrayLength: public LUnaryOperation {
+class LFixedArrayLength: public LUnaryOperation<1> {
  public:
-  explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { }
+  explicit LFixedArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
 
   DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
   DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
 };
 
 
-class LValueOf: public LUnaryOperation {
+class LValueOf: public LUnaryOperation<1> {
  public:
   LValueOf(LOperand* input, LOperand* temporary)
-      : LUnaryOperation(input), temporary_(temporary) { }
+      : LUnaryOperation<1>(input), temporary_(temporary) { }
 
   LOperand* temporary() const { return temporary_; }
 
@@ -1172,46 +1191,46 @@
 };
 
 
-class LThrow: public LUnaryOperation {
+class LThrow: public LUnaryOperation<0> {
  public:
-  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+  explicit LThrow(LOperand* value) : LUnaryOperation<0>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
 };
 
 
-class LBitNotI: public LUnaryOperation {
+class LBitNotI: public LUnaryOperation<1> {
  public:
-  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+  explicit LBitNotI(LOperand* input) : LUnaryOperation<1>(input) { }
 
   DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
 };
 
 
-class LAddI: public LBinaryOperation {
+class LAddI: public LBinaryOperation<1> {
  public:
   LAddI(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) { }
+      : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
   DECLARE_HYDROGEN_ACCESSOR(Add)
 };
 
 
-class LPower: public LBinaryOperation {
+class LPower: public LBinaryOperation<1> {
  public:
   LPower(LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right) { }
+      : LBinaryOperation<1>(left, right) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Power, "power")
   DECLARE_HYDROGEN_ACCESSOR(Power)
 };
 
 
-class LArithmeticD: public LBinaryOperation {
+class LArithmeticD: public LBinaryOperation<1> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right), op_(op) { }
+      : LBinaryOperation<1>(left, right), op_(op) { }
 
   Token::Value op() const { return op_; }
 
@@ -1223,10 +1242,10 @@
 };
 
 
-class LArithmeticT: public LBinaryOperation {
+class LArithmeticT: public LBinaryOperation<1> {
  public:
   LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
-      : LBinaryOperation(left, right), op_(op) { }
+      : LBinaryOperation<1>(left, right), op_(op) { }
 
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
@@ -1238,26 +1257,26 @@
 };
 
 
-class LReturn: public LUnaryOperation {
+class LReturn: public LUnaryOperation<0> {
  public:
-  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+  explicit LReturn(LOperand* use) : LUnaryOperation<0>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Return, "return")
 };
 
 
-class LLoadNamedField: public LUnaryOperation {
+class LLoadNamedField: public LUnaryOperation<1> {
  public:
-  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation<1>(object) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
 };
 
 
-class LLoadNamedGeneric: public LUnaryOperation {
+class LLoadNamedGeneric: public LUnaryOperation<1> {
  public:
-  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation<1>(object) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1267,10 +1286,10 @@
 };
 
 
-class LLoadFunctionPrototype: public LUnaryOperation {
+class LLoadFunctionPrototype: public LUnaryOperation<1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temporary)
-      : LUnaryOperation(function), temporary_(temporary) { }
+      : LUnaryOperation<1>(function), temporary_(temporary) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
   DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
@@ -1283,38 +1302,31 @@
 };
 
 
-class LLoadElements: public LUnaryOperation {
+class LLoadElements: public LUnaryOperation<1> {
  public:
-  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation<1>(obj) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
 };
 
 
-class LLoadKeyedFastElement: public LBinaryOperation {
+class LLoadKeyedFastElement: public LBinaryOperation<1> {
  public:
-  LLoadKeyedFastElement(LOperand* elements,
-                        LOperand* key,
-                        LOperand* load_result)
-      : LBinaryOperation(elements, key),
-        load_result_(load_result) { }
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key)
+      : LBinaryOperation<1>(elements, key) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
 
   LOperand* elements() const { return left(); }
   LOperand* key() const { return right(); }
-  LOperand* load_result() const { return load_result_; }
-
- private:
-  LOperand* load_result_;
 };
 
 
-class LLoadKeyedGeneric: public LBinaryOperation {
+class LLoadKeyedGeneric: public LBinaryOperation<1> {
  public:
   LLoadKeyedGeneric(LOperand* obj, LOperand* key)
-      : LBinaryOperation(obj, key) { }
+      : LBinaryOperation<1>(obj, key) { }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
 
@@ -1323,78 +1335,92 @@
 };
 
 
-class LLoadGlobal: public LInstruction {
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
 };
 
 
-class LStoreGlobal: public LUnaryOperation {
+class LStoreGlobal: public LUnaryOperation<0> {
  public:
-  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation<0>(value) {}
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
 };
 
 
-class LPushArgument: public LUnaryOperation {
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int context_chain_length() const {
+    return hydrogen()->context_chain_length();
+  }
+  int slot_index() const { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LUnaryOperation<0> {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation<0>(argument) {}
 
   DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
 };
 
 
-class LGlobalObject: public LInstruction {
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
 };
 
 
-class LGlobalReceiver: public LInstruction {
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
 };
 
 
-class LCallConstantFunction: public LInstruction {
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
   DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
-  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  Handle<JSFunction> function() { return hydrogen()->function(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallKeyed: public LInstruction {
+class LCallKeyed: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
   DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNamed: public LInstruction {
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
   DECLARE_HYDROGEN_ACCESSOR(CallNamed)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> name() const { return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction: public LInstruction {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1403,44 +1429,44 @@
 };
 
 
-class LCallGlobal: public LInstruction {
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
   DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> name() const {return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallKnownGlobal: public LInstruction {
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
   DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   Handle<JSFunction> target() const { return hydrogen()->target();  }
   int arity() const { return hydrogen()->argument_count() - 1;  }
 };
 
 
-class LCallNew: public LUnaryOperation {
+class LCallNew: public LUnaryOperation<1> {
  public:
-  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation<1>(constructor) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime: public LInstruction {
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1450,26 +1476,26 @@
 };
 
 
-class LInteger32ToDouble: public LUnaryOperation {
+class LInteger32ToDouble: public LUnaryOperation<1> {
  public:
-  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation<1>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
 };
 
 
-class LNumberTagI: public LUnaryOperation {
+class LNumberTagI: public LUnaryOperation<1> {
  public:
-  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation<1>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
 };
 
 
-class LNumberTagD: public LUnaryOperation {
+class LNumberTagD: public LUnaryOperation<1> {
  public:
   explicit LNumberTagD(LOperand* value, LOperand* temp)
-      : LUnaryOperation(value), temp_(temp) { }
+      : LUnaryOperation<1>(value), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
 
@@ -1481,22 +1507,27 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LUnaryOperation {
+class LDoubleToI: public LUnaryOperation<1> {
  public:
-  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+  LDoubleToI(LOperand* value, LOperand* temporary)
+      : LUnaryOperation<1>(value), temporary_(temporary) { }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
   DECLARE_HYDROGEN_ACCESSOR(Change)
 
   bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temporary() const { return temporary_; }
+
+ private:
+  LOperand* temporary_;
 };
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LUnaryOperation {
+class LTaggedToI: public LUnaryOperation<1> {
  public:
   LTaggedToI(LOperand* value, LOperand* temp)
-      : LUnaryOperation(value), temp_(temp) { }
+      : LUnaryOperation<1>(value), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
   DECLARE_HYDROGEN_ACCESSOR(Change)
@@ -1509,26 +1540,26 @@
 };
 
 
-class LSmiTag: public LUnaryOperation {
+class LSmiTag: public LUnaryOperation<1> {
  public:
-  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+  explicit LSmiTag(LOperand* use) : LUnaryOperation<1>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
 };
 
 
-class LNumberUntagD: public LUnaryOperation {
+class LNumberUntagD: public LUnaryOperation<1> {
  public:
-  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation<1>(value) { }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
 };
 
 
-class LSmiUntag: public LUnaryOperation {
+class LSmiUntag: public LUnaryOperation<1> {
  public:
   LSmiUntag(LOperand* use, bool needs_check)
-      : LUnaryOperation(use), needs_check_(needs_check) { }
+      : LUnaryOperation<1>(use), needs_check_(needs_check) { }
 
   DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
 
@@ -1539,89 +1570,69 @@
 };
 
 
-class LStoreNamed: public LInstruction {
+class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
  public:
-  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
-      : object_(obj), name_(name), value_(val) { }
+  LStoreNamed(LOperand* obj, LOperand* val) {
+    this->SetInputAt(0, obj);
+    this->SetInputAt(1, val);
+  }
 
   DECLARE_INSTRUCTION(StoreNamed)
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
-  LOperand* object() const { return object_; }
-  Handle<Object> name() const { return name_; }
-  LOperand* value() const { return value_; }
-
- private:
-  LOperand* object_;
-  Handle<Object> name_;
-  LOperand* value_;
+  LOperand* object() const { return this->InputAt(0); }
+  LOperand* value() const { return this->InputAt(1); }
+  Handle<Object> name() const { return hydrogen()->name(); }
 };
 
 
 class LStoreNamedField: public LStoreNamed {
  public:
-  LStoreNamedField(LOperand* obj,
-                   Handle<Object> name,
-                   LOperand* val,
-                   bool in_object,
-                   int offset,
-                   LOperand* temp,
-                   bool needs_write_barrier,
-                   Handle<Map> transition)
-      : LStoreNamed(obj, name, val),
-        is_in_object_(in_object),
-        offset_(offset),
-        temp_(temp),
-        needs_write_barrier_(needs_write_barrier),
-        transition_(transition) { }
+  LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
+      : LStoreNamed(obj, val), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  bool is_in_object() { return is_in_object_; }
-  int offset() { return offset_; }
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+  Handle<Map> transition() const { return hydrogen()->transition(); }
+
   LOperand* temp() { return temp_; }
-  bool needs_write_barrier() { return needs_write_barrier_; }
-  Handle<Map> transition() const { return transition_; }
-  void set_transition(Handle<Map> map) { transition_ = map; }
 
  private:
-  bool is_in_object_;
-  int offset_;
   LOperand* temp_;
-  bool needs_write_barrier_;
-  Handle<Map> transition_;
 };
 
 
 class LStoreNamedGeneric: public LStoreNamed {
  public:
-  LStoreNamedGeneric(LOperand* obj,
-                     Handle<Object> name,
-                     LOperand* val)
-      : LStoreNamed(obj, name, val) { }
+  LStoreNamedGeneric(LOperand* obj, LOperand* val)
+      : LStoreNamed(obj, val) { }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 };
 
 
-class LStoreKeyed: public LInstruction {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
  public:
-  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
-      : object_(obj), key_(key), value_(val) { }
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+    this->SetInputAt(0, obj);
+    this->SetInputAt(1, key);
+    this->SetInputAt(2, val);
+  }
 
   DECLARE_INSTRUCTION(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
 
-  LOperand* object() const { return object_; }
-  LOperand* key() const { return key_; }
-  LOperand* value() const { return value_; }
-
- private:
-  LOperand* object_;
-  LOperand* key_;
-  LOperand* value_;
+  LOperand* object() const { return this->InputAt(0); }
+  LOperand* key() const { return this->InputAt(1); }
+  LOperand* value() const { return this->InputAt(2); }
 };
 
 
@@ -1645,19 +1656,19 @@
 };
 
 
-class LCheckFunction: public LUnaryOperation {
+class LCheckFunction: public LUnaryOperation<0> {
  public:
-  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation<0>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
 
 
-class LCheckInstanceType: public LUnaryOperation {
+class LCheckInstanceType: public LUnaryOperation<0> {
  public:
   LCheckInstanceType(LOperand* use, LOperand* temp)
-      : LUnaryOperation(use), temp_(temp) { }
+      : LUnaryOperation<0>(use), temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
   DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
@@ -1669,41 +1680,36 @@
 };
 
 
-class LCheckMap: public LUnaryOperation {
+class LCheckMap: public LUnaryOperation<0> {
  public:
-  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+  explicit LCheckMap(LOperand* use) : LUnaryOperation<0>(use) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
   DECLARE_HYDROGEN_ACCESSOR(CheckMap)
 };
 
 
-class LCheckPrototypeMaps: public LInstruction {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 0> {
  public:
-  LCheckPrototypeMaps(LOperand* temp,
-                      Handle<JSObject> holder,
-                      Handle<Map> receiver_map)
-      : temp_(temp),
-        holder_(holder),
-        receiver_map_(receiver_map) { }
+  explicit LCheckPrototypeMaps(LOperand* temp) : temp_(temp) { }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
 
   LOperand* temp() const { return temp_; }
-  Handle<JSObject> holder() const { return holder_; }
-  Handle<Map> receiver_map() const { return receiver_map_; }
 
  private:
   LOperand* temp_;
-  Handle<JSObject> holder_;
-  Handle<Map> receiver_map_;
 };
 
 
-class LCheckSmi: public LUnaryOperation {
+class LCheckSmi: public LUnaryOperation<0> {
  public:
   LCheckSmi(LOperand* use, Condition condition)
-      : LUnaryOperation(use), condition_(condition) { }
+      : LUnaryOperation<0>(use), condition_(condition) { }
 
   Condition condition() const { return condition_; }
 
@@ -1717,7 +1723,7 @@
 };
 
 
-class LMaterializedLiteral: public LInstruction {
+class LMaterializedLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_INSTRUCTION(MaterializedLiteral)
 };
@@ -1744,7 +1750,7 @@
 };
 
 
-class LFunctionLiteral: public LInstruction {
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
   DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -1753,18 +1759,18 @@
 };
 
 
-class LTypeof: public LUnaryOperation {
+class LTypeof: public LUnaryOperation<1> {
  public:
-  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+  explicit LTypeof(LOperand* input) : LUnaryOperation<1>(input) { }
 
   DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
 };
 
 
-class LTypeofIs: public LUnaryOperation {
+class LTypeofIs: public LUnaryOperation<1> {
  public:
-  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
-  virtual void PrintDataTo(StringStream* stream) const;
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation<1>(input) { }
+  virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
   DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
@@ -1784,7 +1790,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream);
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -1796,9 +1802,10 @@
 };
 
 
-class LDeleteProperty: public LBinaryOperation {
+class LDeleteProperty: public LBinaryOperation<1> {
  public:
-  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+  LDeleteProperty(LOperand* obj, LOperand* key)
+      : LBinaryOperation<1>(obj, key) { }
 
   DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
 
@@ -1807,7 +1814,7 @@
 };
 
 
-class LOsrEntry: public LInstruction {
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry();
 
@@ -1830,114 +1837,12 @@
 };
 
 
-class LStackCheck: public LInstruction {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
 };
 
 
-class LPointerMap: public ZoneObject {
- public:
-  explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
-  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
-  int position() const { return position_; }
-  int lithium_position() const { return lithium_position_; }
-
-  void set_lithium_position(int pos) {
-    ASSERT(lithium_position_ == -1);
-    lithium_position_ = pos;
-  }
-
-  void RecordPointer(LOperand* op);
-  void PrintTo(StringStream* stream) const;
-
- private:
-  ZoneList<LOperand*> pointer_operands_;
-  int position_;
-  int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
-  LEnvironment(Handle<JSFunction> closure,
-               int ast_id,
-               int parameter_count,
-               int argument_count,
-               int value_count,
-               LEnvironment* outer)
-      : closure_(closure),
-        arguments_stack_height_(argument_count),
-        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
-        translation_index_(-1),
-        ast_id_(ast_id),
-        parameter_count_(parameter_count),
-        values_(value_count),
-        representations_(value_count),
-        spilled_registers_(NULL),
-        spilled_double_registers_(NULL),
-        outer_(outer) {
-  }
-
-  Handle<JSFunction> closure() const { return closure_; }
-  int arguments_stack_height() const { return arguments_stack_height_; }
-  int deoptimization_index() const { return deoptimization_index_; }
-  int translation_index() const { return translation_index_; }
-  int ast_id() const { return ast_id_; }
-  int parameter_count() const { return parameter_count_; }
-  const ZoneList<LOperand*>* values() const { return &values_; }
-  LEnvironment* outer() const { return outer_; }
-
-  void AddValue(LOperand* operand, Representation representation) {
-    values_.Add(operand);
-    representations_.Add(representation);
-  }
-
-  bool HasTaggedValueAt(int index) const {
-    return representations_[index].IsTagged();
-  }
-
-  void Register(int deoptimization_index, int translation_index) {
-    ASSERT(!HasBeenRegistered());
-    deoptimization_index_ = deoptimization_index;
-    translation_index_ = translation_index;
-  }
-  bool HasBeenRegistered() const {
-    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
-  }
-
-  void SetSpilledRegisters(LOperand** registers,
-                           LOperand** double_registers) {
-    spilled_registers_ = registers;
-    spilled_double_registers_ = double_registers;
-  }
-
-  // Emit frame translation commands for this environment.
-  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
-
-  void PrintTo(StringStream* stream) const;
-
- private:
-  Handle<JSFunction> closure_;
-  int arguments_stack_height_;
-  int deoptimization_index_;
-  int translation_index_;
-  int ast_id_;
-  int parameter_count_;
-  ZoneList<LOperand*> values_;
-  ZoneList<Representation> representations_;
-
-  // Allocation index indexed arrays of spill slot operands for registers
-  // that are also in spill slots at an OSR entry.  NULL for environments
-  // that do not correspond to an OSR entry.
-  LOperand** spilled_registers_;
-  LOperand** spilled_double_registers_;
-
-  LEnvironment* outer_;
-};
-
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -1993,8 +1898,6 @@
     inlined_closures_.Add(closure);
   }
 
-  void Verify() const;
-
  private:
   int spill_slot_count_;
   HGraph* const graph_;
@@ -2077,13 +1980,24 @@
 
   // Methods for setting up define-use relationships.
   // Return the same instruction that they are passed.
-  LInstruction* Define(LInstruction* instr, LUnallocated* result);
-  LInstruction* Define(LInstruction* instr);
-  LInstruction* DefineAsRegister(LInstruction* instr);
-  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
-  LInstruction* DefineSameAsFirst(LInstruction* instr);
-  LInstruction* DefineFixed(LInstruction* instr, Register reg);
-  LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+                           LUnallocated* result);
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+                                    int index);
+  template<int I, int T>
+      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                Register reg);
+  template<int I, int T>
+      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+                                      XMMRegister reg);
   LInstruction* AssignEnvironment(LInstruction* instr);
   LInstruction* AssignPointerMap(LInstruction* instr);
 
@@ -2104,8 +2018,6 @@
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
 
-  // Temporary operand that may be a memory location.
-  LOperand* Temp();
   // Temporary operand that must be in a register.
   LUnallocated* TempRegister();
   LOperand* FixedTemp(Register reg);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a6f4679..10c942a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -877,55 +877,53 @@
       Immediate(Factory::cons_ascii_string_map()));
 }
 
-// All registers must be distinct.  Only current_string needs valid contents
-// on entry.  All registers may be invalid on exit.  result_operand is
-// unchanged, padding_chars is updated correctly.
-void MacroAssembler::AppendStringToTopOfNewSpace(
-    Register current_string,  // Tagged pointer to string to copy.
-    Register current_string_length,
-    Register result_pos,
-    Register scratch,
-    Register new_padding_chars,
-    Operand operand_result,
-    Operand operand_padding_chars,
-    Label* bailout) {
-  mov(current_string_length,
-      FieldOperand(current_string, String::kLengthOffset));
-  shr(current_string_length, 1);
-  sub(current_string_length, operand_padding_chars);
-  mov(new_padding_chars, current_string_length);
-  add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
-  and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
-  sub(new_padding_chars, Operand(current_string_length));
-  neg(new_padding_chars);
-  // We need an allocation even if current_string_length is 0, to fetch
-  // result_pos.  Consider using a faster fetch of result_pos in that case.
-  AllocateInNewSpace(current_string_length, result_pos, scratch, no_reg,
-                     bailout, NO_ALLOCATION_FLAGS);
-  sub(result_pos, operand_padding_chars);
-  mov(operand_padding_chars, new_padding_chars);
 
-  Register scratch_2 = new_padding_chars;  // Used to compute total length.
-  // Copy string to the end of result.
-  mov(current_string_length,
-      FieldOperand(current_string, String::kLengthOffset));
-  mov(scratch, operand_result);
-  mov(scratch_2, current_string_length);
-  add(scratch_2, FieldOperand(scratch, String::kLengthOffset));
-  mov(FieldOperand(scratch, String::kLengthOffset), scratch_2);
-  shr(current_string_length, 1);
-  lea(current_string,
-      FieldOperand(current_string, SeqAsciiString::kHeaderSize));
-  // Loop condition: while (--current_string_length >= 0).
-  Label copy_loop;
-  Label copy_loop_entry;
-  jmp(&copy_loop_entry);
-  bind(&copy_loop);
-  mov_b(scratch, Operand(current_string, current_string_length, times_1, 0));
-  mov_b(Operand(result_pos, current_string_length, times_1, 0), scratch);
-  bind(&copy_loop_entry);
-  sub(Operand(current_string_length), Immediate(1));
-  j(greater_equal, &copy_loop);
+// Copy memory, byte-by-byte, from source to destination.  Not optimized for
+// long or aligned copies.  The contents of scratch and length are destroyed.
+// Source and destination are incremented by length.
+// Many variants of movsb, loop unrolling, word moves, and indexed operands
+// have been tried here already, and this is fastest.
+// A simpler loop is faster on small copies, but 30% slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register source,
+                               Register destination,
+                               Register length,
+                               Register scratch) {
+  Label loop, done, short_string, short_loop;
+  // Experimentation shows that the short string loop is faster if length < 10.
+  cmp(Operand(length), Immediate(10));
+  j(less_equal, &short_string);
+
+  ASSERT(source.is(esi));
+  ASSERT(destination.is(edi));
+  ASSERT(length.is(ecx));
+
+  // Because source is 4-byte aligned in our uses of this function,
+  // we keep source aligned for the rep_movs call by copying the odd bytes
+  // at the end of the ranges.
+  mov(scratch, Operand(source, length, times_1, -4));
+  mov(Operand(destination, length, times_1, -4), scratch);
+  mov(scratch, ecx);
+  shr(ecx, 2);
+  rep_movs();
+  and_(Operand(scratch), Immediate(0x3));
+  add(destination, Operand(scratch));
+  jmp(&done);
+
+  bind(&short_string);
+  test(length, Operand(length));
+  j(zero, &done);
+
+  bind(&short_loop);
+  mov_b(scratch, Operand(source, 0));
+  mov_b(Operand(destination, 0), scratch);
+  inc(source);
+  inc(destination);
+  dec(length);
+  j(not_zero, &short_loop);
+
+  bind(&done);
 }
 
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6f5fa87..6f180c6 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -386,22 +386,13 @@
                                Register scratch2,
                                Label* gc_required);
 
-  // All registers must be distinct.  Only current_string needs valid contents
-  // on entry.  All registers may be invalid on exit.  result_operand is
-  // unchanged, padding_chars is updated correctly.
-  // The top of new space must contain a sequential ascii string with
-  // padding_chars bytes free in its top word.  The sequential ascii string
-  // current_string is concatenated to it, allocating the necessary amount
-  // of new memory.
-  void AppendStringToTopOfNewSpace(
-      Register current_string,  // Tagged pointer to string to copy.
-      Register current_string_length,
-      Register result_pos,
-      Register scratch,
-      Register new_padding_chars,
-      Operand operand_result,
-      Operand operand_padding_chars,
-      Label* bailout);
+  // Copy memory, byte-by-byte, from source to destination.  Not optimized for
+  // long or aligned copies.
+  // The contents of index and scratch are destroyed.
+  void CopyBytes(Register source,
+                 Register destination,
+                 Register length,
+                 Register scratch);
 
   // ---------------------------------------------------------------------------
   // Support functions.
diff --git a/src/json.js b/src/json.js
index 0034176..e90d5d1 100644
--- a/src/json.js
+++ b/src/json.js
@@ -179,24 +179,60 @@
 
 
 function BasicSerializeArray(value, stack, builder) {
+  var len = value.length;
+  if (len == 0) {
+    builder.push("[]");
+    return;
+  }
   if (!%PushIfAbsent(stack, value)) {
     throw MakeTypeError('circular_structure', []);
   }
   builder.push("[");
-  var len = value.length;
-  for (var i = 0; i < len; i++) {
+  var val = value[0];
+  if (IS_STRING(val)) {
+    // First entry is a string. Remaining entries are likely to be strings too.
+    builder.push(%QuoteJSONString(val));
+    for (var i = 1; i < len; i++) {
+      val = value[i];
+      if (IS_STRING(val)) {
+        builder.push(%QuoteJSONStringComma(val));
+      } else {
+        builder.push(",");
+        var before = builder.length;
+        BasicJSONSerialize(i, value[i], stack, builder);
+        if (before == builder.length) builder[before - 1] = ",null";
+      }
+    }
+  } else if (IS_NUMBER(val)) {
+    // First entry is a number. Remaining entries are likely to be numbers too.
+    builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
+    for (var i = 1; i < len; i++) {
+      builder.push(",");
+      val = value[i];
+      if (IS_NUMBER(val)) {
+        builder.push(NUMBER_IS_FINITE(val) 
+                     ? %_NumberToString(val) 
+                     : "null");
+      } else {
+        var before = builder.length;
+        BasicJSONSerialize(i, value[i], stack, builder);
+        if (before == builder.length) builder[before - 1] = ",null";
+      }
+    }
+  } else {
     var before = builder.length;
-    BasicJSONSerialize(i, value, stack, builder);
+    BasicJSONSerialize(0, val, stack, builder);
     if (before == builder.length) builder.push("null");
-    builder.push(",");
+    for (var i = 1; i < len; i++) {
+      builder.push(",");
+      before = builder.length;
+      val = value[i];
+      BasicJSONSerialize(i, val, stack, builder);
+      if (before == builder.length) builder[before - 1] = ",null";
+    }
   }
   stack.pop();
-  if (builder.pop() != ",") {
-    builder.push("[]");  // Zero length array. Push "[" back on.
-  } else {
-    builder.push("]");
-  }
-
+  builder.push("]"); 
 }
 
 
@@ -205,31 +241,31 @@
     throw MakeTypeError('circular_structure', []);
   }
   builder.push("{");
+  var first = true;
   for (var p in value) {
     if (%HasLocalProperty(value, p)) {
-      builder.push(%QuoteJSONString(p));
+      if (!first) {
+        builder.push(%QuoteJSONStringComma(p));
+      } else {
+        builder.push(%QuoteJSONString(p));
+      }
       builder.push(":");
       var before = builder.length;
-      BasicJSONSerialize(p, value, stack, builder);
+      BasicJSONSerialize(p, value[p], stack, builder);
       if (before == builder.length) {
         builder.pop();
         builder.pop();
       } else {
-        builder.push(",");
+        first = false;
       }
     }
   }
   stack.pop();
-  if (builder.pop() != ",") {
-    builder.push("{}");  // Object has no own properties. Push "{" back on.
-  } else {
-    builder.push("}");
-  }
+  builder.push("}");
 }
 
 
-function BasicJSONSerialize(key, holder, stack, builder) {
-  var value = holder[key];
+function BasicJSONSerialize(key, value, stack, builder) {
   if (IS_SPEC_OBJECT(value)) {
     var toJSON = value.toJSON;
     if (IS_FUNCTION(toJSON)) {
@@ -266,7 +302,7 @@
 function JSONStringify(value, replacer, space) {
   if (%_ArgumentsLength() == 1) {
     var builder = [];
-    BasicJSONSerialize('', {'': value}, [], builder);
+    BasicJSONSerialize('', value, [], builder);
     if (builder.length == 0) return;
     var result = %_FastAsciiArrayJoin(builder, "");
     if (!IS_UNDEFINED(result)) return result;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index e0f2e62..8e7c35f 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -425,7 +425,7 @@
     Handle<JSRegExp> regexp,
     Handle<String> subject,
     int index,
-    Vector<int32_t> output) {
+    Vector<int> output) {
   Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
 
   ASSERT(index >= 0);
@@ -521,8 +521,8 @@
   OffsetsVector registers(required_registers);
 
   IrregexpResult res = RegExpImpl::IrregexpExecOnce(
-      jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
-                                                         registers.length()));
+      jsregexp, subject, previous_index, Vector<int>(registers.vector(),
+                                                     registers.length()));
   if (res == RE_SUCCESS) {
     int capture_register_count =
         (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 6f04be3..af28a87 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -114,7 +114,7 @@
   static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
                                          Handle<String> subject,
                                          int index,
-                                         Vector<int32_t> registers);
+                                         Vector<int> registers);
 
   // Execute an Irregexp bytecode pattern.
   // On a successful match, the result is a JSArray containing
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index abdef09..29662c9 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -828,6 +828,10 @@
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
       } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+        // The live range of writable input registers always goes until the end
+        // of the instruction.
+        ASSERT(!cur_input->IsUsedAtStart());
+
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
         cur_input->set_virtual_register(next_virtual_register_++);
 
@@ -837,7 +841,6 @@
               cur_input->virtual_register() - first_artificial_register_);
         }
 
-        second->AddTemp(cur_input);
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
       }
     }
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 454e302..dfe1953 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -50,7 +50,6 @@
 class LChunk;
 class LConstantOperand;
 class LGap;
-class LInstruction;
 class LParallelMove;
 class LPointerMap;
 class LStackSlot;
@@ -706,6 +705,7 @@
   bool HasAllocatedSpillOperand() const {
     return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
   }
+
   LOperand* GetSpillOperand() const { return spill_operand_; }
   void SetSpillOperand(LOperand* operand) {
     ASSERT(!operand->IsUnallocated());
@@ -723,7 +723,6 @@
   bool Covers(LifetimePosition position);
   LifetimePosition FirstIntersection(LiveRange* other);
 
-
   // Add a new interval or a new use position to this live range.
   void EnsureInterval(LifetimePosition start, LifetimePosition end);
   void AddUseInterval(LifetimePosition start, LifetimePosition end);
diff --git a/src/lithium.cc b/src/lithium.cc
index 92e81d3..e066e7d 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -30,149 +30,63 @@
 namespace v8 {
 namespace internal {
 
-
-class LGapNode: public ZoneObject {
- public:
-  explicit LGapNode(LOperand* operand)
-      : operand_(operand), resolved_(false), visited_id_(-1) { }
-
-  LOperand* operand() const { return operand_; }
-  bool IsResolved() const { return !IsAssigned() || resolved_; }
-  void MarkResolved() {
-    ASSERT(!IsResolved());
-    resolved_ = true;
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
   }
-  int visited_id() const { return visited_id_; }
-  void set_visited_id(int id) {
-    ASSERT(id > visited_id_);
-    visited_id_ = id;
-  }
-
-  bool IsAssigned() const { return assigned_from_.is_set(); }
-  LGapNode* assigned_from() const { return assigned_from_.get(); }
-  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
-  LOperand* operand_;
-  SetOncePointer<LGapNode> assigned_from_;
-  bool resolved_;
-  int visited_id_;
-};
-
-
-LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
-                           LOperand* marker_operand)
-    : nodes_(4),
-      identified_cycles_(4),
-      result_(4),
-      marker_operand_(marker_operand),
-      next_visited_id_(0) {
-  for (int i = 0; i < moves->length(); ++i) {
-    LMoveOperands move = moves->at(i);
-    if (!move.IsRedundant()) RegisterMove(move);
-  }
+  return true;
 }
 
 
-const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
-  for (int i = 0; i < identified_cycles_.length(); ++i) {
-    ResolveCycle(identified_cycles_[i]);
-  }
-
-  int unresolved_nodes;
-  do {
-    unresolved_nodes = 0;
-    for (int j = 0; j < nodes_.length(); j++) {
-      LGapNode* node = nodes_[j];
-      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
-        AddResultMove(node->assigned_from(), node);
-        node->MarkResolved();
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
       }
-      if (!node->IsResolved()) ++unresolved_nodes;
+      stream->Add("; ");
     }
-  } while (unresolved_nodes > 0);
-  return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
-  AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
-  result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start) {
-  ZoneList<LOperand*> circle_operands(8);
-  circle_operands.Add(marker_operand_);
-  LGapNode* cur = start;
-  do {
-    cur->MarkResolved();
-    circle_operands.Add(cur->operand());
-    cur = cur->assigned_from();
-  } while (cur != start);
-  circle_operands.Add(marker_operand_);
-
-  for (int i = circle_operands.length() - 1; i > 0; --i) {
-    LOperand* from = circle_operands[i];
-    LOperand* to = circle_operands[i - 1];
-    AddResultMove(from, to);
   }
 }
 
 
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
-  ASSERT(a != b);
-  LGapNode* cur = a;
-  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
-    cur->set_visited_id(visited_id);
-    cur = cur->assigned_from();
-  }
-
-  return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
-  ASSERT(a != b);
-  return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
-  if (move.from()->IsConstantOperand()) {
-    // Constant moves should be last in the machine code. Therefore add them
-    // first to the result set.
-    AddResultMove(move.from(), move.to());
-  } else {
-    LGapNode* from = LookupNode(move.from());
-    LGapNode* to = LookupNode(move.to());
-    if (to->IsAssigned() && to->assigned_from() == from) {
-      move.Eliminate();
-      return;
+void LEnvironment::PrintTo(StringStream* stream) {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
     }
-    ASSERT(!to->IsAssigned());
-    if (CanReach(from, to)) {
-      // This introduces a circle. Save.
-      identified_cycles_.Add(from);
-    }
-    to->set_assigned_from(from);
   }
+  stream->Add("]");
 }
 
 
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
-  for (int i = 0; i < nodes_.length(); ++i) {
-    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
-  }
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
 
-  // No node found => create a new one.
-  LGapNode* result = new LGapNode(operand);
-  nodes_.Add(result);
-  return result;
+
+void LPointerMap::PrintTo(StringStream* stream) {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
 }
 
 
diff --git a/src/lithium.h b/src/lithium.h
index 0ea3769..f4ae2aa 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -28,36 +28,142 @@
 #ifndef V8_LITHIUM_H_
 #define V8_LITHIUM_H_
 
+#include "hydrogen.h"
 #include "lithium-allocator.h"
+#include "safepoint-table.h"
 
 namespace v8 {
 namespace internal {
 
-class LGapNode;
+class LCodeGen;
+class Translation;
 
-class LGapResolver BASE_EMBEDDED {
+class LParallelMove : public ZoneObject {
  public:
-  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
-  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
 
  private:
-  LGapNode* LookupNode(LOperand* operand);
-  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
-  bool CanReach(LGapNode* a, LGapNode* b);
-  void RegisterMove(LMoveOperands move);
-  void AddResultMove(LOperand* from, LOperand* to);
-  void AddResultMove(LGapNode* from, LGapNode* to);
-  void ResolveCycle(LGapNode* start);
-
-  ZoneList<LGapNode*> nodes_;
-  ZoneList<LGapNode*> identified_cycles_;
-  ZoneList<LMoveOperands> result_;
-  LOperand* marker_operand_;
-  int next_visited_id_;
-  int bailout_after_ast_id_;
+  ZoneList<LMoveOperands> move_operands_;
 };
 
 
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream);
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  LOperand** spilled_registers() const { return spilled_registers_; }
+  LOperand** spilled_double_registers() const {
+    return spilled_double_registers_;
+  }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  void PrintTo(StringStream* stream);
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+
+  friend class LCodegen;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 7ed22c8..e05c53c 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -980,15 +980,15 @@
   // LiveEdit main entry point: changes a script text to a new string.
   function SetScriptSource(script, new_source, preview_only, change_log) {
     var old_source = script.source;
-    var diff = CompareStringsLinewise(old_source, new_source);
+    var diff = CompareStrings(old_source, new_source);
     return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
         change_log);
   }
   // Function is public.
   this.SetScriptSource = SetScriptSource;
 
-  function CompareStringsLinewise(s1, s2) {
-    return %LiveEditCompareStringsLinewise(s1, s2);
+  function CompareStrings(s1, s2) {
+    return %LiveEditCompareStrings(s1, s2);
   }
 
   // Applies the change to the script.
@@ -1076,7 +1076,7 @@
   // Functions are public for tests.
   this.TestApi = {
     PosTranslator: PosTranslator,
-    CompareStringsLinewise: CompareStringsLinewise,
+    CompareStrings: CompareStrings,
     ApplySingleChunkPatch: ApplySingleChunkPatch
   }
 }
diff --git a/src/liveedit.cc b/src/liveedit.cc
index c4cb68e..b6ad4cf 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -275,6 +275,82 @@
 }
 
 
+// A helper class that writes chunk numbers into JSArray.
+// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
+class CompareOutputArrayWriter {
+ public:
+  CompareOutputArrayWriter()
+      : array_(Factory::NewJSArray(10)), current_size_(0) {}
+
+  Handle<JSArray> GetResult() {
+    return array_;
+  }
+
+  void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
+    SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
+    SetElement(array_, current_size_ + 1,
+               Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+    SetElement(array_, current_size_ + 2,
+               Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+    current_size_ += 3;
+  }
+
+ private:
+  Handle<JSArray> array_;
+  int current_size_;
+};
+
+
+// Represents 2 strings as 2 arrays of tokens.
+// TODO(LiveEdit): Currently it's actually an array of charactres.
+//     Make array of tokens instead.
+class TokensCompareInput : public Comparator::Input {
+ public:
+  TokensCompareInput(Handle<String> s1, int offset1, int len1,
+                       Handle<String> s2, int offset2, int len2)
+      : s1_(s1), offset1_(offset1), len1_(len1),
+        s2_(s2), offset2_(offset2), len2_(len2) {
+  }
+  virtual int getLength1() {
+    return len1_;
+  }
+  virtual int getLength2() {
+    return len2_;
+  }
+  bool equals(int index1, int index2) {
+    return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
+  }
+
+ private:
+  Handle<String> s1_;
+  int offset1_;
+  int len1_;
+  Handle<String> s2_;
+  int offset2_;
+  int len2_;
+};
+
+
+// Stores compare result in JSArray. Converts substring positions
+// to absolute positions.
+class TokensCompareOutput : public Comparator::Output {
+ public:
+  TokensCompareOutput(CompareOutputArrayWriter* array_writer,
+                      int offset1, int offset2)
+        : array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
+  }
+
+  void AddChunk(int pos1, int pos2, int len1, int len2) {
+    array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
+  }
+
+ private:
+  CompareOutputArrayWriter* array_writer_;
+  int offset1_;
+  int offset2_;
+};
+
+
 // Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
 // never has terminating new line character.
 class LineEndsWrapper {
@@ -350,13 +426,14 @@
 };
 
 
-// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
-// (pos1_begin, pos1_end, pos2_end).
-class LineArrayCompareOutput : public Comparator::Output {
+// Stores compare result in JSArray. For each chunk tries to conduct
+// a fine-grained nested diff token-wise.
+class TokenizingLineArrayCompareOutput : public Comparator::Output {
  public:
-  LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
-      : array_(Factory::NewJSArray(10)), current_size_(0),
-        line_ends1_(line_ends1), line_ends2_(line_ends2) {
+  TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
+                                   LineEndsWrapper line_ends2,
+                                   Handle<String> s1, Handle<String> s2)
+      : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
   }
 
   void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
@@ -365,33 +442,43 @@
     int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
     int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
 
-    SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
-    SetElement(array_, current_size_ + 1,
-               Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
-    SetElement(array_, current_size_ + 2,
-               Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
-    current_size_ += 3;
+    if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
+      // Chunk is small enough to conduct a nested token-level diff.
+      HandleScope subTaskScope;
+
+      TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
+                                      s2_, char_pos2, char_len2);
+      TokensCompareOutput tokens_output(&array_writer_, char_pos1,
+                                          char_pos2);
+
+      Comparator::CalculateDifference(&tokens_input, &tokens_output);
+    } else {
+      array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
+    }
   }
 
   Handle<JSArray> GetResult() {
-    return array_;
+    return array_writer_.GetResult();
   }
 
  private:
-  Handle<JSArray> array_;
-  int current_size_;
+  static const int CHUNK_LEN_LIMIT = 800;
+
+  CompareOutputArrayWriter array_writer_;
   LineEndsWrapper line_ends1_;
   LineEndsWrapper line_ends2_;
+  Handle<String> s1_;
+  Handle<String> s2_;
 };
 
 
-Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
-                                                 Handle<String> s2) {
+Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
+                                         Handle<String> s2) {
   LineEndsWrapper line_ends1(s1);
   LineEndsWrapper line_ends2(s2);
 
   LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
-  LineArrayCompareOutput output(line_ends1, line_ends2);
+  TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
 
   Comparator::CalculateDifference(&input, &output);
 
diff --git a/src/liveedit.h b/src/liveedit.h
index 3632180..5f2c99c 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -126,10 +126,11 @@
     FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
   };
 
-  // Compares 2 strings line-by-line and returns diff in form of array of
-  // triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
-  static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
-                                                Handle<String> s2);
+  // Compares 2 strings line-by-line, then token-wise and returns diff in form
+  // of array of triplets (pos1, pos1_end, pos2_end) describing list
+  // of diff chunks.
+  static Handle<JSArray> CompareStrings(Handle<String> s1,
+                                        Handle<String> s2);
 };
 
 
diff --git a/src/messages.js b/src/messages.js
index c19f4a9..a30ef8a 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -97,6 +97,12 @@
     var constructorName = constructor.name;
     if (!constructorName) return ToString(obj);
     return "#<" + GetInstanceName(constructorName) + ">";
+  } else if (obj instanceof $Error) {
+    // When formatting internally created error messages, do not
+    // invoke overwritten error toString methods but explicitly use
+    // the error to string method. This is to avoid leaking error
+    // objects between script tags in a browser setting.
+    return %_CallFunction(obj, errorToString);
   } else {
     return ToString(obj);
   }
@@ -943,15 +949,28 @@
   }
   %FunctionSetInstanceClassName(f, 'Error');
   %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
-  f.prototype.name = name;
+  // The name property on the prototype of error objects is not
+  // specified as being read-one and dont-delete. However, allowing
+  // overwriting allows leaks of error objects between script blocks
+  // in the same context in a browser setting. Therefore we fix the
+  // name.
+  %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
   %SetCode(f, function(m) {
     if (%_IsConstructCall()) {
+      // Define all the expected properties directly on the error
+      // object. This avoids going through getters and setters defined
+      // on prototype objects.
+      %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
+      %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
+      %IgnoreAttributesAndSetProperty(this, 'type', void 0);
       if (m === kAddMessageAccessorsMarker) {
+        // DefineOneShotAccessor always inserts a message property and
+        // ignores setters.
         DefineOneShotAccessor(this, 'message', function (obj) {
           return FormatMessage({type: obj.type, args: obj.arguments});
         });
       } else if (!IS_UNDEFINED(m)) {
-        this.message = ToString(m);
+        %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
       }
       captureStackTrace(this, f);
     } else {
@@ -987,14 +1006,17 @@
 // Setup extra properties of the Error.prototype object.
 $Error.prototype.message = '';
 
-%SetProperty($Error.prototype, 'toString', function toString() {
+function errorToString() {
   var type = this.type;
   if (type && !this.hasOwnProperty("message")) {
     return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
   }
-  var message = this.message;
-  return this.name + (message ? (": " + message) : "");
-}, DONT_ENUM);
+  var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
+  return this.name + message;
+}
+
+%FunctionSetName(errorToString, 'toString');
+%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
 
 
 // Boilerplate for exceptions for stack overflows. Used from
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index a3552c7..7d50bfb 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -674,13 +674,12 @@
   ASSERT_EQ(0, finger % kEntrySize);
 
   if (FLAG_enable_slow_asserts) {
-    for (int i = kEntriesIndex; i < size; i++) {
-      ASSERT(!get(i)->IsTheHole());
+    STATIC_ASSERT(2 == kEntrySize);
+    for (int i = kEntriesIndex; i < length(); i += kEntrySize) {
       get(i)->Verify();
-    }
-    for (int i = size; i < length(); i++) {
-      ASSERT(get(i)->IsTheHole());
-      get(i)->Verify();
+      get(i + 1)->Verify();
+      // Key and value must be either both the holes, or not.
+      ASSERT(get(i)->IsTheHole() == get(i + 1)->IsTheHole());
     }
   }
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 3c9dc82..abfd443 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2238,7 +2238,6 @@
 
 
 void Map::set_instance_type(InstanceType value) {
-  ASSERT(0 <= value && value < 256);
   WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
 }
 
@@ -2990,13 +2989,6 @@
 
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
-  // If optimization has been disabled for the shared function info,
-  // reflect that in the code object so it will not be counted as
-  // optimizable code.
-  ASSERT(value->kind() != Code::FUNCTION ||
-         !value->optimizable() ||
-         this->code() == Builtins::builtin(Builtins::Illegal) ||
-         this->allows_lazy_compilation());
   WRITE_FIELD(this, kCodeOffset, value);
   CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
 }
@@ -3216,28 +3208,28 @@
 
 
 Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
-  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   return READ_FIELD(this, OffsetOfFunctionWithId(id));
 }
 
 
 void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
                                               Object* value) {
-  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
   WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
 }
 
 
 Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
-  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
 }
 
 
 void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
                                                    Code* value) {
-  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
   ASSERT(!Heap::InNewSpace(value));
 }
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 6510ca8..ea6d795 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -146,7 +146,7 @@
   }
 
   void Register(StaticVisitorBase::VisitorId id, Callback callback) {
-    ASSERT((0 <= id) && (id < StaticVisitorBase::kVisitorIdCount));
+    ASSERT(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
     callbacks_[id] = callback;
   }
 
diff --git a/src/objects.cc b/src/objects.cc
index f3f8003..36a8e5c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5399,7 +5399,8 @@
 
 void JSFunction::MarkForLazyRecompilation() {
   ASSERT(is_compiled() && !IsOptimized());
-  ASSERT(shared()->allows_lazy_compilation());
+  ASSERT(shared()->allows_lazy_compilation() ||
+         code()->optimizable());
   ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
 }
 
@@ -5987,14 +5988,9 @@
 }
 
 
-uint8_t* Code::GetSafepointEntry(Address pc) {
+SafepointEntry Code::GetSafepointEntry(Address pc) {
   SafepointTable table(this);
-  unsigned pc_offset = static_cast<unsigned>(pc - instruction_start());
-  for (unsigned i = 0; i < table.length(); i++) {
-    // TODO(kasperl): Replace the linear search with binary search.
-    if (table.GetPcOffset(i) == pc_offset) return table.GetEntry(i);
-  }
-  return NULL;
+  return table.FindEntry(pc);
 }
 
 
@@ -6265,12 +6261,15 @@
       PrintF(out, "%p  %4d  ", (instruction_start() + pc_offset), pc_offset);
       table.PrintEntry(i);
       PrintF(out, " (sp -> fp)");
-      int deoptimization_index = table.GetDeoptimizationIndex(i);
-      if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-        PrintF(out, "  %6d", deoptimization_index);
+      SafepointEntry entry = table.GetEntry(i);
+      if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+        PrintF(out, "  %6d", entry.deoptimization_index());
       } else {
         PrintF(out, "  <none>");
       }
+      if (entry.argument_count() > 0) {
+        PrintF(out, " argc: %d", entry.argument_count());
+      }
       PrintF(out, "\n");
     }
     PrintF(out, "\n");
diff --git a/src/objects.h b/src/objects.h
index 063555e..c136dc5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3121,6 +3121,9 @@
 };
 
 
+class SafepointEntry;
+
+
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
  public:
@@ -3268,9 +3271,8 @@
   inline byte compare_state();
   inline void set_compare_state(byte value);
 
-  // Get the safepoint entry for the given pc. Returns NULL for
-  // non-safepoint pcs.
-  uint8_t* GetSafepointEntry(Address pc);
+  // Get the safepoint entry for the given pc.
+  SafepointEntry GetSafepointEntry(Address pc);
 
   // Mark this code object as not having a stack check table.  Assumes kind
   // is FUNCTION.
diff --git a/src/parser.cc b/src/parser.cc
index 5ea1c5e..6ad9ab3 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -600,7 +600,8 @@
       extension_(extension),
       pre_data_(pre_data),
       fni_(NULL),
-      stack_overflow_(false) {
+      stack_overflow_(false),
+      parenthesized_function_(false) {
   AstNode::ResetIds();
 }
 
@@ -777,7 +778,8 @@
                              const char* type,
                              Vector<const char*> args) {
   MessageLocation location(script_,
-                           source_location.beg_pos, source_location.end_pos);
+                           source_location.beg_pos,
+                           source_location.end_pos);
   Handle<JSArray> array = Factory::NewJSArray(args.length());
   for (int i = 0; i < args.length(); i++) {
     SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i])));
@@ -787,6 +789,21 @@
 }
 
 
+void Parser::ReportMessageAt(Scanner::Location source_location,
+                             const char* type,
+                             Vector<Handle<String> > args) {
+  MessageLocation location(script_,
+                           source_location.beg_pos,
+                           source_location.end_pos);
+  Handle<JSArray> array = Factory::NewJSArray(args.length());
+  for (int i = 0; i < args.length(); i++) {
+    SetElement(array, i, args[i]);
+  }
+  Handle<Object> result = Factory::NewSyntaxError(type, array);
+  Top::Throw(*result, &location);
+}
+
+
 // Base class containing common code for the different finder classes used by
 // the parser.
 class ParserFinder {
@@ -1692,12 +1709,16 @@
   IterationStatement* target = NULL;
   target = LookupContinueTarget(label, CHECK_OK);
   if (target == NULL) {
-    // Illegal continue statement.  To be consistent with KJS we delay
-    // reporting of the syntax error until runtime.
-    Handle<String> error_type = Factory::illegal_continue_symbol();
-    if (!label.is_null()) error_type = Factory::unknown_label_symbol();
-    Expression* throw_error = NewThrowSyntaxError(error_type, label);
-    return new ExpressionStatement(throw_error);
+    // Illegal continue statement.
+    const char* message = "illegal_continue";
+    Vector<Handle<String> > args;
+    if (!label.is_null()) {
+      message = "unknown_label";
+      args = Vector<Handle<String> >(&label, 1);
+    }
+    ReportMessageAt(scanner().location(), message, args);
+    *ok = false;
+    return NULL;
   }
   ExpectSemicolon(CHECK_OK);
   return new ContinueStatement(target);
@@ -1723,12 +1744,16 @@
   BreakableStatement* target = NULL;
   target = LookupBreakTarget(label, CHECK_OK);
   if (target == NULL) {
-    // Illegal break statement.  To be consistent with KJS we delay
-    // reporting of the syntax error until runtime.
-    Handle<String> error_type = Factory::illegal_break_symbol();
-    if (!label.is_null()) error_type = Factory::unknown_label_symbol();
-    Expression* throw_error = NewThrowSyntaxError(error_type, label);
-    return new ExpressionStatement(throw_error);
+    // Illegal break statement.
+    const char* message = "illegal_break";
+    Vector<Handle<String> > args;
+    if (!label.is_null()) {
+      message = "unknown_label";
+      args = Vector<Handle<String> >(&label, 1);
+    }
+    ReportMessageAt(scanner().location(), message, args);
+    *ok = false;
+    return NULL;
   }
   ExpectSemicolon(CHECK_OK);
   return new BreakStatement(target);
@@ -2482,9 +2507,13 @@
         // The calls that need special treatment are the
         // direct (i.e. not aliased) eval calls. These calls are all of the
         // form eval(...) with no explicit receiver object where eval is not
-        // declared in the current scope chain. These calls are marked as
-        // potentially direct eval calls. Whether they are actually direct calls
-        // to eval is determined at run time.
+        // declared in the current scope chain.
+        // These calls are marked as potentially direct eval calls. Whether
+        // they are actually direct calls to eval is determined at run time.
+        // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
+        // in the local scope chain. It only matters that it's called "eval",
+        // is called without a receiver and it refers to the original eval
+        // function.
         VariableProxy* callee = result->AsVariableProxy();
         if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
           Handle<String> name = callee->name();
@@ -2734,6 +2763,9 @@
 
     case Token::LPAREN:
       Consume(Token::LPAREN);
+      // Heuristically try to detect immediately called functions before
+      // seeing the call parentheses.
+      parenthesized_function_ = (peek() == Token::FUNCTION);
       result = ParseExpression(true, CHECK_OK);
       Expect(Token::RPAREN, CHECK_OK);
       break;
@@ -3225,8 +3257,11 @@
 
     // Determine if the function will be lazily compiled. The mode can
     // only be PARSE_LAZILY if the --lazy flag is true.
-    bool is_lazily_compiled =
-        mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+    bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
+                               top_scope_->outer_scope()->is_global_scope() &&
+                               top_scope_->HasTrivialOuterContext() &&
+                               !parenthesized_function_);
+    parenthesized_function_ = false;  // The bit was set for this function only.
 
     int function_block_pos = scanner().location().beg_pos;
     int materialized_literal_count;
diff --git a/src/parser.h b/src/parser.h
index 1dfc153..0613a8d 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -430,6 +430,9 @@
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
                        Vector<const char*> args);
+  void ReportMessageAt(Scanner::Location loc,
+                       const char* message,
+                       Vector<Handle<String> > args);
 
  protected:
   FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
@@ -682,6 +685,11 @@
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
   bool stack_overflow_;
+  // If true, the next (and immediately following) function literal is
+  // preceded by a parenthesis.
+  // Heuristically that means that the function will be called immediately,
+  // so never lazily compile it.
+  bool parenthesized_function_;
 };
 
 
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 04c25a9..dc4493a 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -45,6 +45,7 @@
 #include <errno.h>
 #include <ieeefp.h>  // finite()
 #include <signal.h>  // sigemptyset(), etc
+#include <sys/kdi_regs.h>
 
 
 #undef MAP_TYPE
@@ -493,6 +494,16 @@
 
   int Unlock() { return pthread_mutex_unlock(&mutex_); }
 
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
  private:
   pthread_mutex_t mutex_;
 };
@@ -584,21 +595,37 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 static Sampler* active_sampler_ = NULL;
+static pthread_t vm_tid_ = 0;
+
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
   USE(info);
   if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL) return;
+  if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
+  if (vm_tid_ != pthread_self()) return;
 
-  TickSample sample;
-  sample.pc = 0;
-  sample.sp = 0;
-  sample.fp = 0;
+  TickSample sample_obj;
+  TickSample* sample = CpuProfiler::TickSampleEvent();
+  if (sample == NULL) sample = &sample_obj;
 
-  // We always sample the VM state.
-  sample.state = VMState::current_state();
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+  sample->state = Top::current_vm_state();
 
-  active_sampler_->Tick(&sample);
+#if V8_HOST_ARCH_IA32
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_ESP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EBP]);
+#elif V8_HOST_ARCH_X64
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RSP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RBP]);
+#else
+  UNIMPLEMENTED();
+#endif
+  active_sampler_->SampleStack(sample);
+  active_sampler_->Tick(sample);
 }
 
 
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index dba3026..3817935 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -69,8 +69,12 @@
     }
   }
 
-  virtual void PushBack(uc16 ch) {
+  virtual void PushBack(uc32 ch) {
     ASSERT(pos_ > 0);
+    if (ch == kEndOfInput) {
+      pos_--;
+      return;
+    }
     if (buffer_cursor_ <= pushback_buffer_) {
       // No more room in the current buffer to do pushbacks.
       if (pushback_buffer_end_cache_ == NULL) {
@@ -98,7 +102,8 @@
         buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
       }
     }
-    pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = ch;
+    pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
+        static_cast<uc16>(ch);
     pos_--;
   }
 
diff --git a/src/preparser.cc b/src/preparser.cc
index e05f903..c0dcc0b 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -1,3 +1,4 @@
+
 // Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
@@ -894,6 +895,7 @@
 
     case i::Token::LPAREN:
       Consume(i::Token::LPAREN);
+      parenthesized_function_ = (peek() == i::Token::FUNCTION);
       result = ParseExpression(true, CHECK_OK);
       Expect(i::Token::RPAREN, CHECK_OK);
       if (result == kIdentifierExpression) result = kUnknownExpression;
@@ -1071,8 +1073,10 @@
   // Determine if the function will be lazily compiled.
   // Currently only happens to top-level functions.
   // Optimistically assume that all top-level functions are lazily compiled.
-  bool is_lazily_compiled =
-      (outer_scope_type == kTopLevelScope && !inside_with && allow_lazy_);
+  bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
+                             !inside_with && allow_lazy_ &&
+                             !parenthesized_function_);
+  parenthesized_function_ = false;
 
   if (is_lazily_compiled) {
     log_->PauseRecording();
diff --git a/src/preparser.h b/src/preparser.h
index 536e6d4..66fad3b 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -144,7 +144,8 @@
         scope_(NULL),
         stack_limit_(stack_limit),
         stack_overflow_(false),
-        allow_lazy_(true) { }
+        allow_lazy_(true),
+        parenthesized_function_(false) { }
 
   // Preparse the program. Only called in PreParseProgram after creating
   // the instance.
@@ -268,6 +269,7 @@
   uintptr_t stack_limit_;
   bool stack_overflow_;
   bool allow_lazy_;
+  bool parenthesized_function_;
 };
 } }  // v8::preparser
 
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 34d1877..4476cb8 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2385,7 +2385,7 @@
   if (interrupted) return false;
   SetRootGcRootsReference();
   RootsReferencesExtractor extractor(this);
-  Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  Heap::IterateRoots(&extractor, VISIT_ALL);
   return ReportProgress();
 }
 
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 6c9c2eb..9deea86 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -76,18 +76,18 @@
                                     Label* on_end_of_input,
                                     bool check_bounds = true,
                                     int characters = 1);
-  virtual void CheckCharacter(uint32_t c, Label* on_equal);
-  virtual void CheckCharacterAfterAnd(uint32_t c,
-                                      uint32_t mask,
+  virtual void CheckCharacter(unsigned c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(unsigned c,
+                                      unsigned mask,
                                       Label* on_equal);
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
   virtual void CheckCharacterLT(uc16 limit, Label* on_less);
   virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
   virtual void CheckAtStart(Label* on_at_start);
   virtual void CheckNotAtStart(Label* on_not_at_start);
-  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
-  virtual void CheckNotCharacterAfterAnd(uint32_t c,
-                                         uint32_t mask,
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(unsigned c,
+                                         unsigned mask,
                                          Label* on_not_equal);
   virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
                                               uc16 minus,
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 463c1a8..fa2c657 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -213,7 +213,7 @@
 }
 
 
-void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
+void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
   PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
          c, LabelToInt(on_equal));
   assembler_->CheckCharacter(c, on_equal);
@@ -232,7 +232,7 @@
 }
 
 
-void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
+void RegExpMacroAssemblerTracer::CheckNotCharacter(unsigned c,
                                                    Label* on_not_equal) {
   PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
          c, LabelToInt(on_not_equal));
@@ -241,8 +241,8 @@
 
 
 void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
-    uint32_t c,
-    uint32_t mask,
+    unsigned c,
+    unsigned mask,
     Label* on_equal) {
   PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
          c,
@@ -253,8 +253,8 @@
 
 
 void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
-    uint32_t c,
-    uint32_t mask,
+    unsigned c,
+    unsigned mask,
     Label* on_not_equal) {
   PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
          c,
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 6a8f4d4..1fb6d54 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -43,9 +43,9 @@
   virtual void Backtrack();
   virtual void Bind(Label* label);
   virtual void CheckAtStart(Label* on_at_start);
-  virtual void CheckCharacter(uint32_t c, Label* on_equal);
-  virtual void CheckCharacterAfterAnd(uint32_t c,
-                                      uint32_t and_with,
+  virtual void CheckCharacter(unsigned c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(unsigned c,
+                                      unsigned and_with,
                                       Label* on_equal);
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
   virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -60,9 +60,9 @@
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
                                                Label* on_no_match);
   virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
-  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
-  virtual void CheckNotCharacterAfterAnd(uint32_t c,
-                                         uint32_t and_with,
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(unsigned c,
+                                         unsigned and_with,
                                          Label* on_not_equal);
   virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
                                               uc16 minus,
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index dc3bd82..ef85d27 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -73,11 +73,11 @@
   virtual void CheckAtStart(Label* on_at_start) = 0;
   // Dispatch after looking the current character up in a 2-bits-per-entry
   // map.  The destinations vector has up to 4 labels.
-  virtual void CheckCharacter(uint32_t c, Label* on_equal) = 0;
+  virtual void CheckCharacter(unsigned c, Label* on_equal) = 0;
   // Bitwise and the current character with the given constant and then
   // check for a match with c.
-  virtual void CheckCharacterAfterAnd(uint32_t c,
-                                      uint32_t and_with,
+  virtual void CheckCharacterAfterAnd(unsigned c,
+                                      unsigned and_with,
                                       Label* on_equal) = 0;
   virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
   virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
@@ -101,9 +101,9 @@
   // fail to match then goto the on_failure label.  End of input always
   // matches.  If the label is NULL then we should pop a backtrack address off
   // the stack and go to that.
-  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal) = 0;
-  virtual void CheckNotCharacterAfterAnd(uint32_t c,
-                                         uint32_t and_with,
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal) = 0;
+  virtual void CheckNotCharacterAfterAnd(unsigned c,
+                                         unsigned and_with,
                                          Label* on_not_equal) = 0;
   // Subtract a constant from the current character, then or with the given
   // constant and then check for a match with c.
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 3d737a4..fd40cdc 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -978,7 +978,7 @@
 }
 
 
-// Assumes code has been parsed and scopes hve been analyzed.  Mutates the
+// Assumes code has been parsed and scopes have been analyzed.  Mutates the
 // AST, so the AST should not continue to be used in the case of failure.
 bool Rewriter::Rewrite(CompilationInfo* info) {
   FunctionLiteral* function = info->function();
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index c53ddd2..1efc6ef 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -165,8 +165,10 @@
   }
 
   SharedFunctionInfo* shared = function->shared();
-  // If the code is not optimizable, don't try OSR.
-  if (!shared->code()->optimizable()) return;
+  // If the code is not optimizable or references context slots, don't try OSR.
+  if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
+    return;
+  }
 
   // We are not prepared to do OSR for a function that already has an
   // allocated arguments object.  The optimized code would bypass it for
diff --git a/src/runtime.cc b/src/runtime.cc
index 2aa4431..0cde777 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1749,6 +1749,7 @@
     // Array, and Object, and some web code
     // doesn't like seeing source code for constructors.
     target->shared()->set_script(Heap::undefined_value());
+    target->shared()->code()->set_optimizable(false);
     // Clear the optimization hints related to the compiled code as these are no
     // longer valid when the code is overwritten.
     target->shared()->ClearThisPropertyAssignmentsInfo();
@@ -4621,12 +4622,12 @@
 }
 
 
-template <typename Char, typename StringType>
+template <typename Char, typename StringType, bool comma>
 static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
   int length = characters.length();
   const Char* read_cursor = characters.start();
   const Char* end = read_cursor + length;
-  const int kSpaceForQuotes = 2;
+  const int kSpaceForQuotes = 2 + (comma ? 1 :0);
   int quoted_length = kSpaceForQuotes;
   while (read_cursor < end) {
     Char c = *(read_cursor++);
@@ -4645,6 +4646,7 @@
 
   Char* write_cursor = reinterpret_cast<Char*>(
       new_string->address() + SeqAsciiString::kHeaderSize);
+  if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
   read_cursor = characters.start();
@@ -4666,14 +4668,14 @@
 }
 
 
-template <typename Char, typename StringType>
+template <typename Char, typename StringType, bool comma>
 static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
   int length = characters.length();
   Counters::quote_json_char_count.Increment(length);
-  const int kSpaceForQuotes = 2;
+  const int kSpaceForQuotes = 2 + (comma ? 1 :0);
   int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
   if (worst_case_length > kMaxGuaranteedNewSpaceString) {
-    return SlowQuoteJsonString<Char, StringType>(characters);
+    return SlowQuoteJsonString<Char, StringType, comma>(characters);
   }
 
   MaybeObject* new_alloc = AllocateRawString<StringType>(worst_case_length);
@@ -4686,7 +4688,7 @@
     // handle it being allocated in old space as may happen in the third
     // attempt.  See CALL_AND_RETRY in heap-inl.h and similar code in
     // CEntryStub::GenerateCore.
-    return SlowQuoteJsonString<Char, StringType>(characters);
+    return SlowQuoteJsonString<Char, StringType, comma>(characters);
   }
   StringType* new_string = StringType::cast(new_object);
   ASSERT(Heap::new_space()->Contains(new_string));
@@ -4694,6 +4696,7 @@
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
       new_string->address() + SeqAsciiString::kHeaderSize);
+  if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
   const Char* read_cursor = characters.start();
@@ -4744,13 +4747,32 @@
     ASSERT(str->IsFlat());
   }
   if (str->IsTwoByteRepresentation()) {
-    return QuoteJsonString<uc16, SeqTwoByteString>(str->ToUC16Vector());
+    return QuoteJsonString<uc16, SeqTwoByteString, false>(str->ToUC16Vector());
   } else {
-    return QuoteJsonString<char, SeqAsciiString>(str->ToAsciiVector());
+    return QuoteJsonString<char, SeqAsciiString, false>(str->ToAsciiVector());
   }
 }
 
 
+static MaybeObject* Runtime_QuoteJSONStringComma(Arguments args) {
+  NoHandleAllocation ha;
+  CONVERT_CHECKED(String, str, args[0]);
+  if (!str->IsFlat()) {
+    MaybeObject* try_flatten = str->TryFlatten();
+    Object* flat;
+    if (!try_flatten->ToObject(&flat)) {
+      return try_flatten;
+    }
+    str = String::cast(flat);
+    ASSERT(str->IsFlat());
+  }
+  if (str->IsTwoByteRepresentation()) {
+    return QuoteJsonString<uc16, SeqTwoByteString, true>(str->ToUC16Vector());
+  } else {
+    return QuoteJsonString<char, SeqAsciiString, true>(str->ToAsciiVector());
+  }
+}
+
 
 static MaybeObject* Runtime_StringParseInt(Arguments args) {
   NoHandleAllocation ha;
@@ -6714,12 +6736,24 @@
   // code from the full compiler.
   if (!function->shared()->code()->optimizable() ||
       Debug::has_break_points()) {
+    if (FLAG_trace_opt) {
+      PrintF("[failed to optimize ");
+      function->PrintName();
+      PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
+          function->shared()->code()->optimizable() ? "T" : "F",
+          Debug::has_break_points() ? "T" : "F");
+    }
     function->ReplaceCode(function->shared()->code());
     return function->code();
   }
   if (CompileOptimized(function, AstNode::kNoNumber)) {
     return function->code();
   }
+  if (FLAG_trace_opt) {
+    PrintF("[failed to optimize ");
+    function->PrintName();
+    PrintF(": optimized compilation failed]\n");
+  }
   function->ReplaceCode(function->shared()->code());
   return Failure::Exception();
 }
@@ -10334,15 +10368,16 @@
   return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
 }
 
-// Compares 2 strings line-by-line and returns diff in form of JSArray of
-// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStringsLinewise(Arguments args) {
+// Compares 2 strings line-by-line, then token-wise and returns diff in form
+// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
+// of diff chunks.
+static MaybeObject* Runtime_LiveEditCompareStrings(Arguments args) {
   ASSERT(args.length() == 2);
   HandleScope scope;
   CONVERT_ARG_CHECKED(String, s1, 0);
   CONVERT_ARG_CHECKED(String, s2, 1);
 
-  return *LiveEdit::CompareStringsLinewise(s1, s2);
+  return *LiveEdit::CompareStrings(s1, s2);
 }
 
 
diff --git a/src/runtime.h b/src/runtime.h
index 2fa7438..dbd8d64 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -106,6 +106,7 @@
   F(URIEscape, 1, 1) \
   F(URIUnescape, 1, 1) \
   F(QuoteJSONString, 1, 1) \
+  F(QuoteJSONStringComma, 1, 1) \
   \
   F(NumberToString, 1, 1) \
   F(NumberToStringSkipCache, 1, 1) \
@@ -361,7 +362,7 @@
   F(LiveEditReplaceRefToNestedFunction, 3, 1) \
   F(LiveEditPatchFunctionPositions, 2, 1) \
   F(LiveEditCheckAndDropActivations, 2, 1) \
-  F(LiveEditCompareStringsLinewise, 2, 1) \
+  F(LiveEditCompareStrings, 2, 1) \
   F(GetFunctionCodePositionFromSource, 2, 1) \
   F(ExecuteInDebugContext, 2, 1) \
   \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index b9468a5..e79dcff 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -26,11 +26,34 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "safepoint-table.h"
+
 #include "disasm.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+
+bool SafepointEntry::HasRegisters() const {
+  ASSERT(is_valid());
+  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+  for (int i = 0; i < num_reg_bytes; i++) {
+    if (bits_[i] != SafepointTable::kNoRegisters) return true;
+  }
+  return false;
+}
+
+
+bool SafepointEntry::HasRegisterAt(int reg_index) const {
+  ASSERT(is_valid());
+  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
+  int byte_index = reg_index >> kBitsPerByteLog2;
+  int bit_index = reg_index & (kBitsPerByte - 1);
+  return (bits_[byte_index] & (1 << bit_index)) != 0;
+}
+
+
 SafepointTable::SafepointTable(Code* code) {
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
   code_ = code;
@@ -41,45 +64,39 @@
   entries_ = pc_and_deoptimization_indexes_ +
             (length_ * kPcAndDeoptimizationIndexSize);
   ASSERT(entry_size_ > 0);
-  ASSERT_EQ(DeoptimizationIndexField::max(), Safepoint::kNoDeoptimizationIndex);
+  ASSERT_EQ(SafepointEntry::DeoptimizationIndexField::max(),
+            Safepoint::kNoDeoptimizationIndex);
 }
 
 
-bool SafepointTable::HasRegisters(uint8_t* entry) {
-  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
-  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
-  for (int i = 0; i < num_reg_bytes; i++) {
-    if (entry[i] != kNoRegisters) return true;
+SafepointEntry SafepointTable::FindEntry(Address pc) const {
+  unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
+  for (unsigned i = 0; i < length(); i++) {
+    // TODO(kasperl): Replace the linear search with binary search.
+    if (GetPcOffset(i) == pc_offset) return GetEntry(i);
   }
-  return false;
-}
-
-
-bool SafepointTable::HasRegisterAt(uint8_t* entry, int reg_index) {
-  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
-  int byte_index = reg_index >> kBitsPerByteLog2;
-  int bit_index = reg_index & (kBitsPerByte - 1);
-  return (entry[byte_index] & (1 << bit_index)) != 0;
+  return SafepointEntry();
 }
 
 
 void SafepointTable::PrintEntry(unsigned index) const {
   disasm::NameConverter converter;
-  uint8_t* entry = GetEntry(index);
+  SafepointEntry entry = GetEntry(index);
+  uint8_t* bits = entry.bits();
 
   // Print the stack slot bits.
   if (entry_size_ > 0) {
     ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
     const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
     int last = entry_size_ - 1;
-    for (int i = first; i < last; i++) PrintBits(entry[i], kBitsPerByte);
+    for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
     int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
-    PrintBits(entry[last], last_bits);
+    PrintBits(bits[last], last_bits);
 
     // Print the registers (if any).
-    if (!HasRegisters(entry)) return;
+    if (!entry.HasRegisters()) return;
     for (int j = 0; j < kNumSafepointRegisters; j++) {
-      if (HasRegisterAt(entry, j)) {
+      if (entry.HasRegisterAt(j)) {
         PrintF(" | %s", converter.NameOfCPURegister(j));
       }
     }
@@ -95,6 +112,11 @@
 }
 
 
+void Safepoint::DefinePointerRegister(Register reg) {
+  registers_->Add(reg.code());
+}
+
+
 Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
                                                  int deoptimization_index) {
   ASSERT(deoptimization_index != -1);
@@ -102,6 +124,8 @@
   pc_and_deoptimization_index.pc = assembler->pc_offset();
   pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
   pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  pc_and_deoptimization_index.arguments = 0;
+  pc_and_deoptimization_index.has_doubles = false;
   deoptimization_info_.Add(pc_and_deoptimization_index);
   indexes_.Add(new ZoneList<int>(8));
   registers_.Add(NULL);
@@ -112,11 +136,13 @@
 Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
     Assembler* assembler, int arguments, int deoptimization_index) {
   ASSERT(deoptimization_index != -1);
-  ASSERT(arguments == 0);  // Only case that works for now.
+  ASSERT(arguments >= 0);
   DeoptimizationInfo pc_and_deoptimization_index;
   pc_and_deoptimization_index.pc = assembler->pc_offset();
   pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
   pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  pc_and_deoptimization_index.arguments = arguments;
+  pc_and_deoptimization_index.has_doubles = false;
   deoptimization_info_.Add(pc_and_deoptimization_index);
   indexes_.Add(new ZoneList<int>(8));
   registers_.Add(new ZoneList<int>(4));
@@ -124,6 +150,22 @@
 }
 
 
+Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
+    Assembler* assembler, int arguments, int deoptimization_index) {
+  ASSERT(deoptimization_index != -1);
+  ASSERT(arguments >= 0);
+  DeoptimizationInfo pc_and_deoptimization_index;
+  pc_and_deoptimization_index.pc = assembler->pc_offset();
+  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  pc_and_deoptimization_index.arguments = arguments;
+  pc_and_deoptimization_index.has_doubles = true;
+  deoptimization_info_.Add(pc_and_deoptimization_index);
+  indexes_.Add(new ZoneList<int>(8));
+  registers_.Add(new ZoneList<int>(4));
+  return Safepoint(indexes_.last(), registers_.last());
+}
+
 unsigned SafepointTableBuilder::GetCodeOffset() const {
   ASSERT(emitted_);
   return offset_;
@@ -152,7 +194,7 @@
   // pc after gap information.
   for (int i = 0; i < length; i++) {
     assembler->dd(deoptimization_info_[i].pc);
-    assembler->dd(EncodeDeoptimizationIndexAndGap(deoptimization_info_[i]));
+    assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
   }
 
   // Emit table of bitmaps.
@@ -197,12 +239,13 @@
 }
 
 
-uint32_t SafepointTableBuilder::EncodeDeoptimizationIndexAndGap(
-    DeoptimizationInfo info) {
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
   unsigned index = info.deoptimization_index;
   unsigned gap_size = info.pc_after_gap - info.pc;
-  uint32_t encoding = SafepointTable::DeoptimizationIndexField::encode(index);
-  encoding |= SafepointTable::GapCodeSizeField::encode(gap_size);
+  uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
+  encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
+  encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
+  encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
   return encoding;
 }
 
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 010ac57..d703051 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -30,13 +30,89 @@
 
 #include "v8.h"
 
-#include "macro-assembler.h"
+#include "heap.h"
 #include "zone.h"
 #include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
+struct Register;
+
+class SafepointEntry BASE_EMBEDDED {
+ public:
+  SafepointEntry() : info_(0), bits_(NULL) {}
+
+  SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
+    ASSERT(is_valid());
+  }
+
+  bool is_valid() const { return bits_ != NULL; }
+
+  bool Equals(const SafepointEntry& other) const {
+    return info_ == other.info_ && bits_ == other.bits_;
+  }
+
+  void Reset() {
+    info_ = 0;
+    bits_ = NULL;
+  }
+
+  int deoptimization_index() const {
+    ASSERT(is_valid());
+    return DeoptimizationIndexField::decode(info_);
+  }
+
+  int gap_code_size() const {
+    ASSERT(is_valid());
+    return GapCodeSizeField::decode(info_);
+  }
+
+  int argument_count() const {
+    ASSERT(is_valid());
+    return ArgumentsField::decode(info_);
+  }
+
+  bool has_doubles() const {
+    ASSERT(is_valid());
+    return SaveDoublesField::decode(info_);
+  }
+
+  uint8_t* bits() {
+    ASSERT(is_valid());
+    return bits_;
+  }
+
+  bool HasRegisters() const;
+  bool HasRegisterAt(int reg_index) const;
+
+  // Reserve 13 bits for the gap code size. On ARM a constant pool can be
+  // emitted when generating the gap code. The size of the const pool is less
+  // than what can be represented in 12 bits, so 13 bits gives room for having
+  // instructions before potentially emitting a constant pool.
+  static const int kGapCodeSizeBits = 13;
+  static const int kArgumentsFieldBits = 3;
+  static const int kSaveDoublesFieldBits = 1;
+  static const int kDeoptIndexBits =
+      32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
+  class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
+  class DeoptimizationIndexField: public BitField<int,
+                                                  kGapCodeSizeBits,
+                                                  kDeoptIndexBits> {};  // NOLINT
+  class ArgumentsField: public BitField<unsigned,
+                                        kGapCodeSizeBits + kDeoptIndexBits,
+                                        kArgumentsFieldBits> {};  // NOLINT
+  class SaveDoublesField: public BitField<bool,
+                                          kGapCodeSizeBits + kDeoptIndexBits +
+                                          kArgumentsFieldBits,
+                                          kSaveDoublesFieldBits> { }; // NOLINT
+
+ private:
+  unsigned info_;
+  uint8_t* bits_;
+};
+
+
 class SafepointTable BASE_EMBEDDED {
  public:
   explicit SafepointTable(Code* code);
@@ -52,28 +128,15 @@
     return Memory::uint32_at(GetPcOffsetLocation(index));
   }
 
-  int GetDeoptimizationIndex(unsigned index) const {
+  SafepointEntry GetEntry(unsigned index) const {
     ASSERT(index < length_);
-    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
-    return DeoptimizationIndexField::decode(value);
+    unsigned info = Memory::uint32_at(GetInfoLocation(index));
+    uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
+    return SafepointEntry(info, bits);
   }
 
-  unsigned GetGapCodeSize(unsigned index) const {
-    ASSERT(index < length_);
-    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
-    return GapCodeSizeField::decode(value);
-  }
-
-  uint8_t* GetEntry(unsigned index) const {
-    ASSERT(index < length_);
-    return &Memory::uint8_at(entries_ + (index * entry_size_));
-  }
-
-  class GapCodeSizeField: public BitField<unsigned, 0, 8> {};
-  class DeoptimizationIndexField: public BitField<int, 8, 24> {};
-
-  static bool HasRegisters(uint8_t* entry);
-  static bool HasRegisterAt(uint8_t* entry, int reg_index);
+  // Returns the entry for the given pc.
+  SafepointEntry FindEntry(Address pc) const;
 
   void PrintEntry(unsigned index) const;
 
@@ -94,7 +157,7 @@
            (index * kPcAndDeoptimizationIndexSize);
   }
 
-  Address GetDeoptimizationLocation(unsigned index) const {
+  Address GetInfoLocation(unsigned index) const {
     return GetPcOffsetLocation(index) + kPcSize;
   }
 
@@ -109,15 +172,19 @@
   Address entries_;
 
   friend class SafepointTableBuilder;
+  friend class SafepointEntry;
+
+  DISALLOW_COPY_AND_ASSIGN(SafepointTable);
 };
 
 
 class Safepoint BASE_EMBEDDED {
  public:
-  static const int kNoDeoptimizationIndex = 0x00ffffff;
+  static const int kNoDeoptimizationIndex =
+      (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
 
   void DefinePointerSlot(int index) { indexes_->Add(index); }
-  void DefinePointerRegister(Register reg) { registers_->Add(reg.code()); }
+  void DefinePointerRegister(Register reg);
 
  private:
   Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
@@ -153,6 +220,16 @@
       int arguments,
       int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
 
+  // Define a new safepoint with all double registers and the normal
+  // registers on the stack for the current position in the body and
+  // take the number of arguments on top of the registers into account.
+  // TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
+  // methods to one method that uses template arguments.
+  Safepoint DefineSafepointWithRegistersAndDoubles(
+      Assembler* assembler,
+      int arguments,
+      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
   // Update the last safepoint with the size of the code generated for the gap
   // following it.
   void SetPcAfterGap(int pc) {
@@ -170,9 +247,11 @@
     unsigned pc;
     unsigned deoptimization_index;
     unsigned pc_after_gap;
+    unsigned arguments;
+    bool has_doubles;
   };
 
-  uint32_t EncodeDeoptimizationIndexAndGap(DeoptimizationInfo info);
+  uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
 
   ZoneList<DeoptimizationInfo> deoptimization_info_;
   ZoneList<ZoneList<int>*> indexes_;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index b668df5..1024ad1 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -64,10 +64,10 @@
   // Returns and advances past the next UC16 character in the input
   // stream. If there are no more characters, it returns a negative
   // value.
-  inline int32_t Advance() {
+  inline uc32 Advance() {
     if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
       pos_++;
-      return *(buffer_cursor_++);
+      return static_cast<uc32>(*(buffer_cursor_++));
     }
     // Note: currently the following increment is necessary to avoid a
     // parser problem! The scanner treats the final kEndOfInput as
@@ -97,13 +97,14 @@
     return SlowSeekForward(character_count);
   }
 
-  // Pushes back the most recently read UC16 character, i.e.,
-  // the value returned by the most recent call to Advance.
+  // Pushes back the most recently read UC16 character (or negative
+  // value if at end of input), i.e., the value returned by the most recent
+  // call to Advance.
   // Must not be used right after calling SeekForward.
-  virtual void PushBack(uc16 character) = 0;
+  virtual void PushBack(int32_t character) = 0;
 
  protected:
-  static const int32_t kEndOfInput = -1;
+  static const uc32 kEndOfInput = -1;
 
   // Ensures that the buffer_cursor_ points to the character at
   // position pos_ of the input, if possible. If the position
diff --git a/src/scanner.cc b/src/scanner.cc
index 7fd6ef2..b66d10b 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -48,14 +48,18 @@
 
 BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
 
-void BufferedUC16CharacterStream::PushBack(uc16 character) {
-  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
-    // buffer_ is writable, buffer_cursor_ is const pointer.
-    buffer_[--buffer_cursor_ - buffer_] = character;
+void BufferedUC16CharacterStream::PushBack(uc32 character) {
+  if (character == kEndOfInput) {
     pos_--;
     return;
   }
-  SlowPushBack(character);
+  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
+    // buffer_ is writable, buffer_cursor_ is const pointer.
+    buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
+    pos_--;
+    return;
+  }
+  SlowPushBack(static_cast<uc16>(character));
 }
 
 
diff --git a/src/scanner.h b/src/scanner.h
index bdf899b..d762182 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -43,7 +43,7 @@
   BufferedUC16CharacterStream();
   virtual ~BufferedUC16CharacterStream();
 
-  virtual void PushBack(uc16 character);
+  virtual void PushBack(uc32 character);
 
  protected:
   static const unsigned kBufferSize = 512;
@@ -107,11 +107,12 @@
                                            int end_position);
   virtual ~ExternalTwoByteStringUC16CharacterStream();
 
-  virtual void PushBack(uc16 character) {
+  virtual void PushBack(uc32 character) {
     ASSERT(buffer_cursor_ > raw_data_);
     buffer_cursor_--;
     pos_--;
   }
+
  protected:
   virtual unsigned SlowSeekForward(unsigned delta) {
     // Fast case always handles seeking.
diff --git a/src/scopes.cc b/src/scopes.cc
index 3565e11..58a10ee 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -112,68 +112,74 @@
 
 // Dummy constructor
 Scope::Scope(Type type)
-  : outer_scope_(NULL),
-    inner_scopes_(0),
-    type_(type),
-    scope_name_(Factory::empty_symbol()),
+  : inner_scopes_(0),
     variables_(false),
     temps_(0),
     params_(0),
-    dynamics_(NULL),
     unresolved_(0),
-    decls_(0),
-    receiver_(NULL),
-    function_(NULL),
-    arguments_(NULL),
-    arguments_shadow_(NULL),
-    illegal_redecl_(NULL),
-    scope_inside_with_(false),
-    scope_contains_with_(false),
-    scope_calls_eval_(false),
-    outer_scope_calls_eval_(false),
-    inner_scope_calls_eval_(false),
-    outer_scope_is_eval_scope_(false),
-    force_eager_compilation_(false),
-    num_stack_slots_(0),
-    num_heap_slots_(0) {
+    decls_(0) {
+  SetDefaults(type, NULL, NULL);
+  ASSERT(!resolved());
 }
 
 
 Scope::Scope(Scope* outer_scope, Type type)
-  : outer_scope_(outer_scope),
-    inner_scopes_(4),
-    type_(type),
-    scope_name_(Factory::empty_symbol()),
+  : inner_scopes_(4),
+    variables_(),
     temps_(4),
     params_(4),
-    dynamics_(NULL),
     unresolved_(16),
-    decls_(4),
-    receiver_(NULL),
-    function_(NULL),
-    arguments_(NULL),
-    arguments_shadow_(NULL),
-    illegal_redecl_(NULL),
-    scope_inside_with_(false),
-    scope_contains_with_(false),
-    scope_calls_eval_(false),
-    outer_scope_calls_eval_(false),
-    inner_scope_calls_eval_(false),
-    outer_scope_is_eval_scope_(false),
-    force_eager_compilation_(false),
-    num_stack_slots_(0),
-    num_heap_slots_(0) {
+    decls_(4) {
+  SetDefaults(type, outer_scope, NULL);
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
   ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
   ASSERT(!HasIllegalRedeclaration());
+  ASSERT(!resolved());
 }
 
 
+Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
+  : inner_scopes_(4),
+    variables_(),
+    temps_(4),
+    params_(4),
+    unresolved_(16),
+    decls_(4) {
+  ASSERT(scope_info != NULL);
+  SetDefaults(FUNCTION_SCOPE, inner_scope->outer_scope(), scope_info);
+  ASSERT(resolved());
+  InsertAfterScope(inner_scope);
+  if (scope_info->HasHeapAllocatedLocals()) {
+    num_heap_slots_ = scope_info_->NumberOfContextSlots();
+  }
+}
+
+
+
 bool Scope::Analyze(CompilationInfo* info) {
   ASSERT(info->function() != NULL);
   Scope* top = info->function()->scope();
+
+  // If we have a serialized scope info, reuse it.
+  if (!info->closure().is_null()) {
+    SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
+    if (scope_info != SerializedScopeInfo::Empty()) {
+      Scope* scope = top;
+      JSFunction* current = *info->closure();
+      do {
+        current = current->context()->closure();
+        SerializedScopeInfo* scope_info = current->shared()->scope_info();
+        if (scope_info != SerializedScopeInfo::Empty()) {
+          scope = new Scope(scope, scope_info);
+        } else {
+          ASSERT(current->context()->IsGlobalContext());
+        }
+      } while (!current->context()->IsGlobalContext());
+    }
+  }
+
   while (top->outer_scope() != NULL) top = top->outer_scope();
   top->AllocateVariables(info->calling_context());
 
@@ -191,6 +197,8 @@
 
 
 void Scope::Initialize(bool inside_with) {
+  ASSERT(!resolved());
+
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
     outer_scope_->inner_scopes_.Add(this);
@@ -210,7 +218,7 @@
   Variable* var =
       variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
                          false, Variable::THIS);
-  var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
+  var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
   receiver_ = var;
 
   if (is_function_scope()) {
@@ -224,7 +232,28 @@
 
 
 Variable* Scope::LocalLookup(Handle<String> name) {
-  return variables_.Lookup(name);
+  Variable* result = variables_.Lookup(name);
+  if (result != NULL || !resolved()) {
+    return result;
+  }
+  // If the scope is resolved, we can find a variable in serialized scope info.
+
+  // We should never lookup 'arguments' in this scope
+  // as it is impllicitly present in any scope.
+  ASSERT(*name != *Factory::arguments_symbol());
+
+  // Check context slot lookup.
+  Variable::Mode mode;
+  int index = scope_info_->ContextSlotIndex(*name, &mode);
+  if (index < 0) {
+    return NULL;
+  }
+
+  // Check that there is no local slot with the given name.
+  ASSERT(scope_info_->StackSlotIndex(*name) < 0);
+  Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
+  return var;
 }
 
 
@@ -250,6 +279,7 @@
   // DYNAMIC variables are introduces during variable allocation,
   // INTERNAL variables are allocated explicitly, and TEMPORARY
   // variables are allocated via NewTemporary().
+  ASSERT(!resolved());
   ASSERT(mode == Variable::VAR || mode == Variable::CONST);
   return variables_.Declare(this, name, mode, true, Variable::NORMAL);
 }
@@ -273,6 +303,7 @@
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
+  ASSERT(!resolved());
   VariableProxy* proxy = new VariableProxy(name, false, inside_with);
   unresolved_.Add(proxy);
   return proxy;
@@ -292,6 +323,7 @@
 
 
 Variable* Scope::NewTemporary(Handle<String> name) {
+  ASSERT(!resolved());
   Variable* var =
       new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
   temps_.Add(var);
@@ -550,7 +582,7 @@
     // Declare a new non-local.
     var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
     // Allocate it by giving it a dynamic lookup.
-    var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
+    var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
   }
   return var;
 }
@@ -612,8 +644,9 @@
   ASSERT(var != NULL);
 
   // If this is a lookup from an inner scope, mark the variable.
-  if (inner_lookup)
-    var->is_accessed_from_inner_scope_ = true;
+  if (inner_lookup) {
+    var->MarkAsAccessedFromInnerScope();
+  }
 
   // If the variable we have found is just a guess, invalidate the
   // result. If the found variable is local, record that fact so we
@@ -753,7 +786,7 @@
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
-      (var->is_accessed_from_inner_scope_ ||
+      (var->is_accessed_from_inner_scope() ||
        scope_calls_eval_ || inner_scope_calls_eval_ ||
        scope_contains_with_)) {
     var->set_is_used(true);
@@ -771,7 +804,7 @@
   // context.
   return
     var->mode() != Variable::TEMPORARY &&
-    (var->is_accessed_from_inner_scope_ ||
+    (var->is_accessed_from_inner_scope() ||
      scope_calls_eval_ || inner_scope_calls_eval_ ||
      scope_contains_with_ || var->is_global());
 }
@@ -787,12 +820,12 @@
 
 
 void Scope::AllocateStackSlot(Variable* var) {
-  var->rewrite_ = new Slot(var, Slot::LOCAL, num_stack_slots_++);
+  var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
 }
 
 
 void Scope::AllocateHeapSlot(Variable* var) {
-  var->rewrite_ = new Slot(var, Slot::CONTEXT, num_heap_slots_++);
+  var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
 }
 
 
@@ -857,7 +890,7 @@
           // It is ok to set this only now, because arguments is a local
           // variable that is allocated after the parameters have been
           // allocated.
-          arguments_shadow_->is_accessed_from_inner_scope_ = true;
+          arguments_shadow_->MarkAsAccessedFromInnerScope();
         }
         Property* rewrite =
             new Property(new VariableProxy(arguments_shadow_),
@@ -865,7 +898,7 @@
                          RelocInfo::kNoPosition,
                          Property::SYNTHETIC);
         rewrite->set_is_arguments_access(true);
-        var->rewrite_ = rewrite;
+        var->set_rewrite(rewrite);
       }
     }
 
@@ -880,23 +913,23 @@
       ASSERT(var->scope() == this);
       if (MustAllocate(var)) {
         if (MustAllocateInContext(var)) {
-          ASSERT(var->rewrite_ == NULL ||
+          ASSERT(var->rewrite() == NULL ||
                  (var->AsSlot() != NULL &&
                   var->AsSlot()->type() == Slot::CONTEXT));
-          if (var->rewrite_ == NULL) {
+          if (var->rewrite() == NULL) {
             // Only set the heap allocation if the parameter has not
             // been allocated yet.
             AllocateHeapSlot(var);
           }
         } else {
-          ASSERT(var->rewrite_ == NULL ||
+          ASSERT(var->rewrite() == NULL ||
                  (var->AsSlot() != NULL &&
                   var->AsSlot()->type() == Slot::PARAMETER));
           // Set the parameter index always, even if the parameter
           // was seen before! (We need to access the actual parameter
           // supplied for the last occurrence of a multiply declared
           // parameter.)
-          var->rewrite_ = new Slot(var, Slot::PARAMETER, i);
+          var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
         }
       }
     }
@@ -906,10 +939,10 @@
 
 void Scope::AllocateNonParameterLocal(Variable* var) {
   ASSERT(var->scope() == this);
-  ASSERT(var->rewrite_ == NULL ||
+  ASSERT(var->rewrite() == NULL ||
          (!var->IsVariable(Factory::result_symbol())) ||
          (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
-  if (var->rewrite_ == NULL && MustAllocate(var)) {
+  if (var->rewrite() == NULL && MustAllocate(var)) {
     if (MustAllocateInContext(var)) {
       AllocateHeapSlot(var);
     } else {
@@ -943,15 +976,18 @@
 
 
 void Scope::AllocateVariablesRecursively() {
-  // The number of slots required for variables.
-  num_stack_slots_ = 0;
-  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-
   // Allocate variables for inner scopes.
   for (int i = 0; i < inner_scopes_.length(); i++) {
     inner_scopes_[i]->AllocateVariablesRecursively();
   }
 
+  // If scope is already resolved, we still need to allocate
+  // variables in inner scopes which might not had been resolved yet.
+  if (resolved()) return;
+  // The number of slots required for variables.
+  num_stack_slots_ = 0;
+  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+
   // Allocate variables for this scope.
   // Parameters must be allocated first, if any.
   if (is_function_scope()) AllocateParameterLocals();
diff --git a/src/scopes.h b/src/scopes.h
index d909b81..09901ad 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -302,6 +302,14 @@
 
   explicit Scope(Type type);
 
+  void InsertAfterScope(Scope* scope) {
+    inner_scopes_.Add(scope);
+    outer_scope_ = scope->outer_scope_;
+    outer_scope_->inner_scopes_.RemoveElement(scope);
+    outer_scope_->inner_scopes_.Add(this);
+    scope->outer_scope_ = this;
+  }
+
   // Scope tree.
   Scope* outer_scope_;  // the immediately enclosing outer scope, or NULL
   ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
@@ -355,6 +363,10 @@
   int num_stack_slots_;
   int num_heap_slots_;
 
+  // Serialized scopes support.
+  SerializedScopeInfo* scope_info_;
+  bool resolved() { return scope_info_ != NULL; }
+
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
   Variable* NonLocal(Handle<String> name, Variable::Mode mode);
@@ -386,6 +398,33 @@
   void AllocateNonParameterLocal(Variable* var);
   void AllocateNonParameterLocals();
   void AllocateVariablesRecursively();
+
+ private:
+  Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
+
+  void SetDefaults(Type type,
+                   Scope* outer_scope,
+                   SerializedScopeInfo* scope_info) {
+    outer_scope_ = outer_scope;
+    type_ = type;
+    scope_name_ = Factory::empty_symbol();
+    dynamics_ = NULL;
+    receiver_ = NULL;
+    function_ = NULL;
+    arguments_ = NULL;
+    arguments_shadow_ = NULL;
+    illegal_redecl_ = NULL;
+    scope_inside_with_ = false;
+    scope_contains_with_ = false;
+    scope_calls_eval_ = false;
+    outer_scope_calls_eval_ = false;
+    inner_scope_calls_eval_ = false;
+    outer_scope_is_eval_scope_ = false;
+    force_eager_compilation_ = false;
+    num_stack_slots_ = 0;
+    num_heap_slots_ = 0;
+    scope_info_ = scope_info;
+  }
 };
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 19e6518..6a6c6bb 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -486,17 +486,21 @@
       UNCLASSIFIED,
       36,
       "LDoubleConstant::one_half");
-  Add(ExternalReference::address_of_negative_infinity().address(),
+  Add(ExternalReference::address_of_minus_zero().address(),
       UNCLASSIFIED,
       37,
+      "LDoubleConstant::minus_zero");
+  Add(ExternalReference::address_of_negative_infinity().address(),
+      UNCLASSIFIED,
+      38,
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function().address(),
       UNCLASSIFIED,
-      38,
+      39,
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function().address(),
       UNCLASSIFIED,
-      39,
+      40,
       "power_double_int_function");
   Add(ExternalReference::arguments_marker_location().address(),
       UNCLASSIFIED,
diff --git a/src/token.h b/src/token.h
index 2f5ca1b..fb890d2 100644
--- a/src/token.h
+++ b/src/token.h
@@ -217,7 +217,7 @@
   // Returns a string corresponding to the C++ token name
   // (e.g. "LT" for the token LT).
   static const char* Name(Value tok) {
-    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    ASSERT(tok < NUM_TOKENS);  // tok is unsigned
     return name_[tok];
   }
 
@@ -292,14 +292,14 @@
   // (.e., "<" for the token LT) or NULL if the token doesn't
   // have a (unique) string (e.g. an IDENTIFIER).
   static const char* String(Value tok) {
-    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    ASSERT(tok < NUM_TOKENS);  // tok is unsigned.
     return string_[tok];
   }
 
   // Returns the precedence > 0 for binary and compare
   // operators; returns 0 otherwise.
   static int Precedence(Value tok) {
-    ASSERT(0 <= tok && tok < NUM_TOKENS);
+    ASSERT(tok < NUM_TOKENS);  // tok is unsigned.
     return precedence_[tok];
   }
 
diff --git a/src/type-info.cc b/src/type-info.cc
index 8719439..032d985 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -58,7 +58,9 @@
 }
 
 
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code) {
+TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
+                                       Handle<Context> global_context) {
+  global_context_ = global_context;
   Initialize(code);
 }
 
@@ -71,17 +73,18 @@
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
-  return IsMonomorphic(expr->position());
+  return GetElement(map_, expr->position())->IsMap();
 }
 
 
 bool TypeFeedbackOracle:: StoreIsMonomorphic(Assignment* expr) {
-  return IsMonomorphic(expr->position());
+  return GetElement(map_, expr->position())->IsMap();
 }
 
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
-  return IsMonomorphic(expr->position());
+  Handle<Object> value = GetElement(map_, expr->position());
+  return value->IsMap() || value->IsSmi();
 }
 
 
@@ -97,12 +100,6 @@
 }
 
 
-Handle<Map> TypeFeedbackOracle::CallMonomorphicReceiverType(Call* expr) {
-  ASSERT(CallIsMonomorphic(expr));
-  return Handle<Map>::cast(GetElement(map_, expr->position()));
-}
-
-
 ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
                                                    Handle<String> name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
@@ -126,6 +123,37 @@
 }
 
 
+CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
+  Handle<Object> value = GetElement(map_, expr->position());
+  if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
+  CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
+  ASSERT(check != RECEIVER_MAP_CHECK);
+  return check;
+}
+
+
+Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
+    CheckType check) {
+  JSFunction* function = NULL;
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      UNREACHABLE();
+      break;
+    case STRING_CHECK:
+      function = global_context_->string_function();
+      break;
+    case NUMBER_CHECK:
+      function = global_context_->number_function();
+      break;
+    case BOOLEAN_CHECK:
+      function = global_context_->boolean_function();
+      break;
+  }
+  ASSERT(function != NULL);
+  return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+}
+
+
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
   Handle<Object> object = GetElement(map_, expr->position());
   return *object == Builtins::builtin(id);
@@ -220,6 +248,7 @@
   return unknown;
 }
 
+
 TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
   Handle<Object> object = GetElement(map_, clause->position());
   TypeInfo unknown = TypeInfo::Unknown();
@@ -247,12 +276,11 @@
 }
 
 
-
 ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
                                                       Handle<String> name,
                                                       Code::Flags flags) {
   Handle<Object> object = GetElement(map_, position);
-  if (object->IsUndefined()) return NULL;
+  if (object->IsUndefined() || object->IsSmi()) return NULL;
 
   if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
     // TODO(fschneider): We could collect the maps and signal that
@@ -301,11 +329,20 @@
         SetElement(map_, position, target);
       }
     } else if (state == MONOMORPHIC) {
-      Handle<Map> map = Handle<Map>(target->FindFirstMap());
-      if (*map == NULL) {
-        SetElement(map_, position, target);
+      if (target->kind() != Code::CALL_IC ||
+          target->check_type() == RECEIVER_MAP_CHECK) {
+        Handle<Map> map = Handle<Map>(target->FindFirstMap());
+        if (*map == NULL) {
+          SetElement(map_, position, target);
+        } else {
+          SetElement(map_, position, map);
+        }
       } else {
-        SetElement(map_, position, map);
+        ASSERT(target->kind() == Code::CALL_IC);
+        CheckType check = target->check_type();
+        ASSERT(check != RECEIVER_MAP_CHECK);
+        SetElement(map_, position, Handle<Object>(Smi::FromInt(check)));
+        ASSERT(Smi::cast(*GetElement(map_, position))->value() == check);
       }
     } else if (state == MEGAMORPHIC) {
       SetElement(map_, position, target);
@@ -342,8 +379,6 @@
         } else if (kind == Code::COMPARE_IC) {
           if (target->compare_state() == CompareIC::GENERIC) continue;
         } else {
-          if (kind == Code::CALL_IC && state == MONOMORPHIC &&
-              target->check_type() != RECEIVER_MAP_CHECK) continue;
           if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
         }
         code_positions->Add(
diff --git a/src/type-info.h b/src/type-info.h
index cb3e75d..98d97de 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -236,7 +236,7 @@
     RESULT
   };
 
-  explicit TypeFeedbackOracle(Handle<Code> code);
+  TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
   bool LoadIsMonomorphic(Property* expr);
   bool StoreIsMonomorphic(Assignment* expr);
@@ -244,12 +244,14 @@
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
   Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
-  Handle<Map> CallMonomorphicReceiverType(Call* expr);
 
   ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
   ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
   ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
 
+  CheckType GetCallCheckType(Call* expr);
+  Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
+
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // Get type information for arithmetic operations and compares.
@@ -260,8 +262,6 @@
  private:
   void Initialize(Handle<Code> code);
 
-  bool IsMonomorphic(int pos) { return GetElement(map_, pos)->IsMap(); }
-
   ZoneMapList* CollectReceiverTypes(int position,
                                     Handle<String> name,
                                     Code::Flags flags);
@@ -272,6 +272,7 @@
                         List<int>* code_positions,
                         List<int>* source_positions);
 
+  Handle<Context> global_context_;
   Handle<JSObject> map_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/src/v8globals.h b/src/v8globals.h
index 65bbf6a..3f27114 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -77,7 +77,8 @@
     reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
 const Address kFromSpaceZapValue =
     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
-const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
+const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
+const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeed);
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
diff --git a/src/v8utils.h b/src/v8utils.h
index cfeb570..e9623be 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -29,6 +29,7 @@
 #define V8_V8UTILS_H_
 
 #include "utils.h"
+#include "platform.h"  // For va_list on Solaris.
 
 namespace v8 {
 namespace internal {
diff --git a/src/variables.cc b/src/variables.cc
index c1440b7..7f580fc 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -98,6 +98,12 @@
 }
 
 
+bool Variable::IsContextSlot() const {
+  Slot* s = AsSlot();
+  return s != NULL && s->type() == Slot::CONTEXT;
+}
+
+
 Variable::Variable(Scope* scope,
                    Handle<String> name,
                    Mode mode,
diff --git a/src/variables.h b/src/variables.h
index 9e460f7..882a52e 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -138,6 +138,9 @@
   bool is_accessed_from_inner_scope() const {
     return is_accessed_from_inner_scope_;
   }
+  void MarkAsAccessedFromInnerScope() {
+    is_accessed_from_inner_scope_ = true;
+  }
   bool is_used() { return is_used_; }
   void set_is_used(bool flag) { is_used_ = flag; }
 
@@ -148,6 +151,7 @@
   bool IsStackAllocated() const;
   bool IsParameter() const;  // Includes 'this'.
   bool IsStackLocal() const;
+  bool IsContextSlot() const;
 
   bool is_dynamic() const {
     return (mode_ == DYNAMIC ||
@@ -175,6 +179,7 @@
   }
 
   Expression* rewrite() const { return rewrite_; }
+  void set_rewrite(Expression* expr) { rewrite_ = expr; }
 
   StaticType* type() { return &type_; }
 
@@ -197,8 +202,6 @@
   // Code generation.
   // rewrite_ is usually a Slot or a Property, but may be any expression.
   Expression* rewrite_;
-
-  friend class Scope;  // Has explicit access to rewrite_.
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index c1cc2fc..a77d85f 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     0
-#define BUILD_NUMBER      7
+#define BUILD_NUMBER      8
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 3629967..9060d57 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -186,6 +186,20 @@
   CPU::FlushICache(pc_, instruction_count);
 }
 
+
+// -----------------------------------------------------------------------------
+// Register constants.
+
+const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = {
+    // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
+    0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+};
+
+const int Register::allocationIndexByRegisterCode[kNumRegisters] = {
+    0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+};
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7bcc7c5..fa2f4c3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -98,19 +98,29 @@
   static const int kNumRegisters = 16;
   static const int kNumAllocatableRegisters = 10;
 
+  static int ToAllocationIndex(Register reg) {
+    return allocationIndexByRegisterCode[reg.code()];
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    Register result = { registerCodeByAllocationIndex[index] };
+    return result;
+  }
+
   static const char* AllocationIndexToString(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
       "rax",
-      "rcx",
-      "rdx",
       "rbx",
+      "rdx",
+      "rcx",
       "rdi",
       "r8",
       "r9",
       "r11",
-      "r12",
-      "r14"
+      "r14",
+      "r12"
     };
     return names[index];
   }
@@ -143,6 +153,9 @@
   // Unfortunately we can't make this private in a struct when initializing
   // by assignment.
   int code_;
+ private:
+  static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
+  static const int allocationIndexByRegisterCode[kNumRegisters];
 };
 
 const Register rax = { 0 };
@@ -173,6 +186,12 @@
     return reg.code() - 1;
   }
 
+  static XMMRegister FromAllocationIndex(int index) {
+    ASSERT(0 <= index && index < kNumAllocatableRegisters);
+    XMMRegister result = { index + 1 };
+    return result;
+  }
+
   static const char* AllocationIndexToString(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
@@ -196,6 +215,7 @@
   }
 
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
     return code_;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 4e890cd..8bb3ac0 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -75,3 +77,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
new file mode 100644
index 0000000..e586851
--- /dev/null
+++ b/src/x64/lithium-codegen-x64.cc
@@ -0,0 +1,1475 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "x64/lithium-codegen-x64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+    : nodes_(32),
+      identified_cycles_(4),
+      result_(16),
+      next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+    const ZoneList<LMoveOperands>* moves,
+    LOperand* marker_operand) {
+  nodes_.Rewind(0);
+  identified_cycles_.Rewind(0);
+  result_.Rewind(0);
+  next_visited_id_ = 0;
+
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i], marker_operand);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+  ZoneList<LOperand*> cycle_operands(8);
+  cycle_operands.Add(marker_operand);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    cycle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  cycle_operands.Add(marker_operand);
+
+  for (int i = cycle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = cycle_operands[i];
+    LOperand* to = cycle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a cycle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  int length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
+#endif
+
+  __ push(rbp);  // Caller's frame pointer.
+  __ movq(rbp, rsp);
+  __ push(rsi);  // Callee's context.
+  __ push(rdi);  // Callee's JS function.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ movl(rax, Immediate(slots));
+      __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+      Label loop;
+      __ bind(&loop);
+      __ push(kScratchRegister);
+      __ decl(rax);
+      __ j(not_zero, &loop);
+    } else {
+      __ subq(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+      // On windows, you may not access the stack more than one page below
+      // the most recently mapped page. To make the allocated area randomly
+      // accessible, we write to each page in turn (the value is irrelevant).
+      const int kPageSize = 4 * KB;
+      for (int offset = slots * kPointerSize - kPageSize;
+           offset > 0;
+           offset -= kPageSize) {
+        __ movq(Operand(rsp, offset), rax);
+      }
+#endif
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  Abort("Unimplemented: %s", "GeneratePrologue");
+  return false;
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+  return XMMRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+  return op->IsConstantOperand() &&
+      chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
+bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
+  return op->IsConstantOperand() &&
+      chunk_->LookupLiteralRepresentation(op).IsTagged();
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  Handle<Object> literal = chunk_->LookupLiteral(op);
+  Representation r = chunk_->LookupLiteralRepresentation(op);
+  ASSERT(r.IsTagged());
+  return literal;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  // Does not handle registers. In X64 assembler, plain registers are not
+  // representable as an Operand.
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return Operand(rbp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return Operand(rbp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  Abort("Unimplemented: %s", "CallCode");
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  Abort("Unimplemented: %s", "CallRuntime");
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  Abort("Unimplemented: %s", "RegisterEnvironmentForDeoptimization");
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  Abort("Unimplemented: %s", "Deoptimiz");
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register rsi always contains a pointer to the context.
+  safepoint.DefinePointerRegister(rsi);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // xmm0 must always be a scratch register.
+  XMMRegister xmm_scratch = xmm0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register cpu_scratch = kScratchRegister;
+
+  const ZoneList<LMoveOperands>* moves =
+      resolver_.Resolve(move->move_operands(), &marker_operand);
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(xmm_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
+    if (from->IsConstantOperand()) {
+      LConstantOperand* constant_from = LConstantOperand::cast(from);
+      if (to->IsRegister()) {
+        if (IsInteger32Constant(constant_from)) {
+          __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
+        } else {
+          __ Move(ToRegister(to), ToHandle(constant_from));
+        }
+      } else {
+        if (IsInteger32Constant(constant_from)) {
+          __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
+        } else {
+          __ Move(ToOperand(to), ToHandle(constant_from));
+        }
+      }
+    } else if (from == &marker_operand) {
+      if (to->IsRegister()) {
+        __ movq(ToRegister(to), cpu_scratch);
+      } else if (to->IsStackSlot()) {
+        __ movq(ToOperand(to), cpu_scratch);
+      } else if (to->IsDoubleRegister()) {
+        __ movsd(ToDoubleRegister(to), xmm_scratch);
+      } else {
+        ASSERT(to->IsDoubleStackSlot());
+        __ movsd(ToOperand(to), xmm_scratch);
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister()) {
+        __ movq(cpu_scratch, ToRegister(from));
+      } else if (from->IsStackSlot()) {
+        __ movq(cpu_scratch, ToOperand(from));
+      } else if (from->IsDoubleRegister()) {
+        __ movsd(xmm_scratch, ToDoubleRegister(from));
+      } else {
+        ASSERT(from->IsDoubleStackSlot());
+        __ movsd(xmm_scratch, ToOperand(from));
+      }
+    } else if (from->IsRegister()) {
+      if (to->IsRegister()) {
+        __ movq(ToRegister(to), ToRegister(from));
+      } else {
+        __ movq(ToOperand(to), ToRegister(from));
+      }
+    } else if (to->IsRegister()) {
+      __ movq(ToRegister(to), ToOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ push(rax);
+      __ movq(rax, ToOperand(from));
+      __ movq(ToOperand(to), rax);
+      __ pop(rax);
+    } else if (from->IsDoubleRegister()) {
+      ASSERT(to->IsDoubleStackSlot());
+      __ movsd(ToOperand(to), ToDoubleRegister(from));
+    } else if (to->IsDoubleRegister()) {
+      ASSERT(from->IsDoubleStackSlot());
+      __ movsd(ToDoubleRegister(to), ToOperand(from));
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      __ movsd(xmm_scratch, ToOperand(from));
+      __ movsd(ToOperand(to), xmm_scratch);
+    }
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  Abort("Unimplemented: %s", "DoCallStub");
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Abort("Unimplemented: %s", "DoModI");
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  Abort("Unimplemented: %s", "DoDivI");}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Abort("Unimplemented: %s", "DoMultI");}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  Abort("Unimplemented: %s", "DoBitI");}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  Abort("Unimplemented: %s", "DoShiftI");
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  Abort("Unimplemented: %s", "DoSubI");
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  Abort("Unimplemented: %s", "DoConstantI");
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  Abort("Unimplemented: %s", "DoConstantI");
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+    ASSERT(instr->result()->IsRegister());
+  __ Move(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+  Abort("Unimplemented: %s", "DoJSArrayLength");
+}
+
+
+void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+  Abort("Unimplemented: %s", "DoFixedArrayLength");
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Abort("Unimplemented: %s", "DoValueOf");
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  Abort("Unimplemented: %s", "DoBitNotI");
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Abort("Unimplemented: %s", "DoThrow");
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  Abort("Unimplemented: %s", "DoAddI");
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  Abort("Unimplemented: %s", "DoArithmeticD");
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  Abort("Unimplemented: %s", "DoArithmeticT");
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  Abort("Unimplemented: %s", "EmitBranch");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Abort("Unimplemented: %s", "DoBranch");
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  Abort("Unimplemented: %s", "EmitGoto");
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  Abort("Unimplemented: %s", "DoDeferredStackCheck");
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LGoto* instr_;
+  };
+
+  DeferredStackCheck* deferred = NULL;
+  if (instr->include_stack_check()) {
+    deferred = new DeferredStackCheck(this, instr);
+  }
+  EmitGoto(instr->block_id(), deferred);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  Abort("Unimplemented: %s", "EmitCmpI");
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  Abort("Unimplemented: %s", "DoCmpID");
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoCmpIDAndBranch");
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Abort("Unimplemented: %s", "DoCmpJSObjectEq");
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch");
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Abort("Unimplemented: %s", "DoIsNull");
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoIsNullAndBranch");
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  Abort("Unimplemented: %s", "EmitIsObject");
+  return below_equal;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+  Abort("Unimplemented: %s", "DoIsObject");
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoIsObjectAndBranch");
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  Abort("Unimplemented: %s", "DoIsSmi");
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoIsSmiAndBranch");
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Abort("Unimplemented: %s", "DoHasInstanceType");
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch");
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  Abort("Unimplemented: %s", "EmitClassOfTest");
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Abort("Unimplemented: %s", "DoClassOfTest");
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoClassOfTestAndBranch");
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoCmpMapAndBranch");
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  Abort("Unimplemented: %s", "DoInstanceOf");
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoInstanceOfAndBranch");
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal");
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                                Label* map_check) {
+  Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl");
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Abort("Unimplemented: %s", "DoCmpT");
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoCmpTAndBranch");
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Preserve the return value on the stack and rely on the runtime
+    // call to return the value in the same register.
+    __ push(rax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+  __ ret((ParameterCount() + 1) * kPointerSize);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Abort("Unimplemented: %s", "DoLoadGlobal");
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Abort("Unimplemented: %s", "DoStoreGlobal");
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Abort("Unimplemented: %s", "DoLoadContextSlot");
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Abort("Unimplemented: %s", "DoLoadNamedField");
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Abort("Unimplemented: %s", "DoLoadElements");
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Abort("Unimplemented: %s", "DoArgumentsElements");
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Abort("Unimplemented: %s", "DoArgumentsLength");
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Abort("Unimplemented: %s", "DoApplyArguments");
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  Abort("Unimplemented: %s", "DoPushArgument");
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Abort("Unimplemented: %s", "DoGlobalObject");
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Abort("Unimplemented: %s", "DoGlobalReceiver");
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  Abort("Unimplemented: %s", "CallKnownFunction");
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  Abort("Unimplemented: %s", "DoCallConstantFunction");
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathAbs");
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathFloor");
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathRound");
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathSqrt");
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathPowHalf");
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Abort("Unimplemented: %s", "DoPower");
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathLog");
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathCos");
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoMathSin");
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoUnaryMathOperation");
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  Abort("Unimplemented: %s", "DoCallKeyed");
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  Abort("Unimplemented: %s", "DoCallNamed");
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  Abort("Unimplemented: %s", "DoCallFunction");
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  Abort("Unimplemented: %s", "DoCallGlobal");
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  Abort("Unimplemented: %s", "DoCallKnownGlobal");
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  Abort("Unimplemented: %s", "DoCallNew");
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  Abort("Unimplemented: %s", "DoCallRuntime");
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Abort("Unimplemented: %s", "DoStoreNamedField");
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Abort("Unimplemented: %s", "DoBoundsCheck");
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  Abort("Unimplemented: %s", "DoInteger32ToDouble");
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  Abort("Unimplemented: %s", "DoNumberTagI");
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Abort("Unimplemented: %s", "DoDeferredNumberTagI");
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  Abort("Unimplemented: %s", "DoNumberTagD");
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  Abort("Unimplemented: %s", "DoDeferredNumberTagD");
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  Abort("Unimplemented: %s", "DoSmiTag");
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Abort("Unimplemented: %s", "DoSmiUntag");
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                XMMRegister result_reg,
+                                LEnvironment* env) {
+  Abort("Unimplemented: %s", "EmitNumberUntagD");
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  Abort("Unimplemented: %s", "DoTaggedToI");
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  Abort("Unimplemented: %s", "DoNumberUntagD");
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Abort("Unimplemented: %s", "DoDoubleToI");
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  Abort("Unimplemented: %s", "DoCheckSmi");
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Abort("Unimplemented: %s", "DoCheckInstanceType");
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  Abort("Unimplemented: %s", "DoCheckFunction");
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  Abort("Unimplemented: %s", "DoCheckMap");
+}
+
+
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+  Abort("Unimplemented: %s", "LoadHeapObject");
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Abort("Unimplemented: %s", "DoArrayLiteral");
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  Abort("Unimplemented: %s", "DoObjectLiteral");
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Abort("Unimplemented: %s", "DoRegExpLiteral");
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  Abort("Unimplemented: %s", "DoFunctionLiteral");
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  Abort("Unimplemented: %s", "DoTypeof");
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Abort("Unimplemented: %s", "DoTypeofIs");
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoTypeofIsAndBranch");
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Abort("Unimplemented: %s", "EmitTypeofIs");
+  return no_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Abort("Unimplemented: %s", "DoDeleteProperty");
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  NearLabel done;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  __ j(above_equal, &done);
+
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  Abort("Unimplemented: %s", "DoOsrEntry");
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index cd1f08d..8d1c5c4 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "x64/lithium-x64.h"
 
+#include "checks.h"
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
@@ -39,22 +40,256 @@
 
 // Forward declarations.
 class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver();
+  const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+                                         LOperand* marker_operand);
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  int next_visited_id_;
+};
+
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
 
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
-  bool GenerateCode() {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool GenerateCode();
 
   // Finish the code by setting stack height, safepoint, and bailout
   // information on it.
-  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                        Label* map_check);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  bool IsInteger32Constant(LConstantOperand* op) const;
+  int ToInteger32(LConstantOperand* op) const;
+  bool IsTaggedConstant(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 8afa9d4..25a048b 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -25,12 +25,438 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "x64/lithium-x64.h"
 #include "x64/lithium-codegen-x64.h"
 
 namespace v8 {
 namespace internal {
 
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    PrintOutputOperandTo(stream);
+  }
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < I; i++) {
+    stream->Add(i == 0 ? "= " : " ");
+    inputs_.at(i)->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  if (this->HasResult()) {
+    this->result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+  stream->Add("[rcx] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  // All stack slots are Double stack slots on x64.
+  // Alternatively, at some point, start using half-size
+  // stack slots for int32 values.
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
   chunk_ = new LChunk(graph());
@@ -62,10 +488,948 @@
 }
 
 
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
-  Abort("Lithium not implemented on x64.");
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
 }
 
 
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+                                    LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateInstruction<1, I, T>* instr,
+    int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateInstruction<1, I, T>* instr,
+    XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+  allocator_->MarkAsSaveDoubles();
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  Abort("Unimplemented: %s", "DoBit");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  Abort("Unimplemented: %s", "DoArithmeticD");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  Abort("Unimplemented: %s", "DoArithmeticT");
+  return NULL;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                            instr->include_stack_check());
+  return (instr->include_stack_check())
+      ? AssignPointerMap(result)
+      : result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  Abort("Unimplemented: %s", "DoBranch");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  Abort("Unimplemented: %s", "DoCompareMapAndBranch");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  Abort("Unimplemented: %s", "DoArgumentsLength");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  Abort("Unimplemented: %s", "DoArgumentsElements");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  Abort("Unimplemented: %s", "DoInstanceOf");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  Abort("Unimplemented: %s", "DoApplyArguments");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  Abort("Unimplemented: %s", "DoPushArgument");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  Abort("Unimplemented: %s", "DoGlobalObject");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  Abort("Unimplemented: %s", "DoGlobalReceiver");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  Abort("Unimplemented: %s", "DoCallConstantFunction");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  Abort("Unimplemented: %s", "DoUnaryMathOperation");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  Abort("Unimplemented: %s", "DoCallKeyed");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  Abort("Unimplemented: %s", "DoCallNamed");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  Abort("Unimplemented: %s", "DoCallGlobal");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  Abort("Unimplemented: %s", "DoCallKnownGlobal");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  Abort("Unimplemented: %s", "DoCallNew");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  Abort("Unimplemented: %s", "DoCallFunction");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  Abort("Unimplemented: %s", "DoCallRuntime");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  Abort("Unimplemented: %s", "DoShr");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  Abort("Unimplemented: %s", "DoSar");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  Abort("Unimplemented: %s", "DoShl");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  Abort("Unimplemented: %s", "DoBitAnd");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  Abort("Unimplemented: %s", "DoBitNot");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  Abort("Unimplemented: %s", "DoBitOr");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  Abort("Unimplemented: %s", "DoBitXor");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  Abort("Unimplemented: %s", "DoDiv");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  Abort("Unimplemented: %s", "DoMod");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  Abort("Unimplemented: %s", "DoMul");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  Abort("Unimplemented: %s", "DoSub");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  Abort("Unimplemented: %s", "DoAdd");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  Abort("Unimplemented: %s", "DoPower");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Abort("Unimplemented: %s", "DoCompare");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  Abort("Unimplemented: %s", "DoCompareJSObjectEq");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  Abort("Unimplemented: %s", "DoIsNull");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+  Abort("Unimplemented: %s", "DoIsObject");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  Abort("Unimplemented: %s", "DoIsSmi");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  Abort("Unimplemented: %s", "DoHasInstanceType");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  Abort("Unimplemented: %s", "DoClassOfTest");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+  Abort("Unimplemented: %s", "DoJSArrayLength");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
+  Abort("Unimplemented: %s", "DoFixedArrayLength");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  Abort("Unimplemented: %s", "DoValueOf");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  Abort("Unimplemented: %s", "DoBoundsCheck");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  Abort("Unimplemented: %s", "DoThrow");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Abort("Unimplemented: %s", "DoChange");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  Abort("Unimplemented: %s", "DoCheckNonSmi");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  Abort("Unimplemented: %s", "DoCheckInstanceType");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  Abort("Unimplemented: %s", "DoCheckSmi");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  Abort("Unimplemented: %s", "DoCheckFunction");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  Abort("Unimplemented: %s", "DoCheckMap");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), rax));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  Abort("Unimplemented: %s", "DoLoadGlobal");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  Abort("Unimplemented: %s", "DoStoreGlobal");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  Abort("Unimplemented: %s", "DoLoadContextSlot");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  Abort("Unimplemented: %s", "DoLoadNamedField");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  Abort("Unimplemented: %s", "DoLoadElements");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  Abort("Unimplemented: %s", "DoStoreNamedField");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  Abort("Unimplemented: %s", "DoArrayLiteral");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  Abort("Unimplemented: %s", "DoObjectLiteral");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  Abort("Unimplemented: %s", "DoRegExpLiteral");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  Abort("Unimplemented: %s", "DoFunctionLiteral");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  Abort("Unimplemented: %s", "DoDeleteProperty");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  Abort("Unimplemented: %s", "DoOsrEntry");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  Abort("Unimplemented: %s", "DoUnknownOSRValue");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  Abort("Unimplemented: %s", "DoCallStub");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  Abort("Unimplemented: %s", "DoArgumentsObject");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  Abort("Unimplemented: %s", "DoTypeof");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  Abort("Unimplemented: %s", "DoTypeofIs");
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+  ASSERT(env->length() == instr->environment_length());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LInstruction* result = AssignEnvironment(lazy_bailout);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  Abort("Unimplemented: %s", "DoEnterInlined");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  Abort("Unimplemented: %s", "DoLeaveInlined");
+  return NULL;
+}
+
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index fcab235..f3023f9 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -38,26 +38,271 @@
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LInstanceOfKnownGlobal
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LPower
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LCheckPrototypeMaps
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGap
+//     LLabel
+//   LGlobalObject
+//   LGlobalReceiver
+//   LGoto
+//   LLazyBailout
+//   LLoadContextSlot
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LRegExpConstructResult
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LFixedArrayLength
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsObject
+//     LIsObjectAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LJSArrayLength
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LLoadFunctionPrototype
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(FixedArrayLength)                           \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(InstanceOfKnownGlobal)                      \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsObject)                                   \
+  V(IsObjectAndBranch)                          \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(JSArrayLength)                              \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(LoadFunctionPrototype)                      \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
 
 class LInstruction: public ZoneObject {
  public:
-  LInstruction() { }
+  LInstruction()
+      : hydrogen_value_(NULL) { }
   virtual ~LInstruction() { }
 
-  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
-  virtual void PrintDataTo(StringStream* stream) const { }
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Predicates should be generated by macro as in lithium-ia32.h.
-  virtual bool IsLabel() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-  virtual bool IsOsrEntry() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
 
   void set_environment(LEnvironment* env) { environment_.set(env); }
   LEnvironment* environment() const { return environment_.get(); }
@@ -67,9 +312,7 @@
   LPointerMap* pointer_map() const { return pointer_map_.get(); }
   bool HasPointerMap() const { return pointer_map_.is_set(); }
 
-  void set_result(LOperand* operand) { result_.set(operand); }
-  LOperand* result() const { return result_.get(); }
-  bool HasResult() const { return result_.is_set(); }
+  virtual bool HasResult() const = 0;
 
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -87,38 +330,81 @@
  private:
   SetOncePointer<LEnvironment> environment_;
   SetOncePointer<LPointerMap> pointer_map_;
-  SetOncePointer<LOperand> result_;
   HValue* hydrogen_value_;
   SetOncePointer<LEnvironment> deoptimization_environment_;
 };
 
 
-class LParallelMove : public ZoneObject {
+template<typename T, int N>
+class OperandContainer {
  public:
-  LParallelMove() : move_operands_(4) { }
-
-  void AddMove(LOperand* from, LOperand* to) {
-    UNIMPLEMENTED();
+  OperandContainer() {
+    for (int i = 0; i < N; i++) elems_[i] = NULL;
   }
-
-  const ZoneList<LMoveOperands>* move_operands() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
+  int length() const { return N; }
+  T at(int i) const { return elems_[i]; }
+  void set_at(int i, T value) { elems_[i] = value; }
  private:
-  ZoneList<LMoveOperands> move_operands_;
+  T elems_[N];
 };
 
 
-class LGap: public LInstruction {
+template<typename T>
+class OperandContainer<T, 0> {
  public:
-  explicit LGap(HBasicBlock* block) { }
-
-  HBasicBlock* block() const {
-    UNIMPLEMENTED();
+  int length() const { return 0; }
+  T at(int i) const {
+    UNREACHABLE();
     return NULL;
   }
+  void set_at(int i, T value) {
+    UNREACHABLE();
+  }
+};
+
+
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const { return R != 0; }
+  void set_result(LOperand* operand) { outputs_.set_at(0, operand); }
+  LOperand* result() const { return outputs_.at(0); }
+
+  int InputCount() const { return inputs_.length(); }
+  LOperand* InputAt(int i) const { return inputs_.at(i); }
+  void SetInputAt(int i, LOperand* operand) { inputs_.set_at(i, operand); }
+
+  int TempCount() const { return temps_.length(); }
+  LOperand* TempAt(int i) const { return temps_.at(i); }
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+ private:
+  OperandContainer<LOperand*, R> outputs_;
+  OperandContainer<LOperand*, I> inputs_;
+  OperandContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream);
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
 
   enum InnerPosition {
     BEFORE,
@@ -129,14 +415,13 @@
     LAST_INNER_POSITION = AFTER
   };
 
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
-    UNIMPLEMENTED();
-    return NULL;
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
   }
 
   LParallelMove* GetParallelMove(InnerPosition pos)  {
-    UNIMPLEMENTED();
-    return NULL;
+    return parallel_moves_[pos];
   }
 
  private:
@@ -145,9 +430,61 @@
 };
 
 
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
 class LLabel: public LGap {
  public:
-  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
 
  private:
   Label label_;
@@ -155,30 +492,1340 @@
 };
 
 
-class LOsrEntry: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
  public:
-  // Function could be generated by a macro as in lithium-ia32.h.
-  static LOsrEntry* cast(LInstruction* instr) {
-    UNIMPLEMENTED();
-    return NULL;
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
+  }
+};
+
+
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int R>
+class LUnaryOperation: public LTemplateInstruction<R, 1, 0> {
+ public:
+  explicit LUnaryOperation<R>(LOperand* input) {
+    this->SetInputAt(0, input);
   }
 
-  LOperand** SpilledRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-  LOperand** SpilledDoubleRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
+  LOperand* input() const { return this->InputAt(0); }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+};
+
+
+template<int R>
+class LBinaryOperation: public LTemplateInstruction<R, 2, 0> {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right) {
+    this->SetInputAt(0, left);
+    this->SetInputAt(1, right);
   }
 
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
-    UNIMPLEMENTED();
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return this->InputAt(0); }
+  LOperand* right() const { return this->InputAt(1); }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    this->SetInputAt(0, function);
+    this->SetInputAt(1, receiver);
+    this->SetInputAt(2, length);
+    this->SetInputAt(3, elements);
   }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return InputAt(0); }
+  LOperand* receiver() const { return InputAt(1); }
+  LOperand* length() const { return InputAt(2); }
+  LOperand* elements() const { return InputAt(3); }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    this->SetInputAt(0, arguments);
+    this->SetInputAt(1, length);
+    this->SetInputAt(2, index);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return this->InputAt(0); }
+  LOperand* length() const { return this->InputAt(1); }
+  LOperand* index() const { return this->InputAt(2); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LUnaryOperation<1> {
+ public:
+  explicit LArgumentsLength(LOperand* elements)
+      : LUnaryOperation<1>(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation<1> {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation<1> {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation<1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation<1>(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation<1> {
+ public:
+  LCmpID(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id)
+      : LCmpID(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation<1> {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation<1>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream);
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation<1> {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation<1> {
+ public:
+  explicit LIsNull(LOperand* value) : LUnaryOperation<1>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+  DECLARE_HYDROGEN_ACCESSOR(IsNull)
+
+  bool is_strict() const { return hydrogen()->is_strict(); }
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsObject: public LUnaryOperation<1> {
+ public:
+  LIsObject(LOperand* value, LOperand* temp)
+      : LUnaryOperation<1>(value), temp_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+  LIsObjectAndBranch(LOperand* value,
+                     LOperand* temp,
+                     LOperand* temp2,
+                     int true_block_id,
+                     int false_block_id)
+      : LIsObject(value, temp),
+        temp2_(temp2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp2() const { return temp2_; }
+
+ private:
+  LOperand* temp2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation<1> {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation<1>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation<1> {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation<1>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation<1> {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation<1>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation<1> {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation<1>(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation<1> {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation<1>(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation<1> {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOfKnownGlobal: public LUnaryOperation<1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* left, LOperand* temp)
+      : LUnaryOperation<1>(left), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation<0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation<0>(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation<1> {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation<1> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation<1>(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation<1> {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LTemplateInstruction<1, 0, 0> {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation<0> {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation<0>(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation<0> {
+ public:
+  explicit LCmpMapAndBranch(LOperand* value) : LUnaryOperation<0>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return hydrogen()->map(); }
+  int true_block_id() const {
+    return hydrogen()->true_destination()->block_id();
+  }
+  int false_block_id() const {
+    return hydrogen()->false_destination()->block_id();
+  }
+};
+
+
+class LJSArrayLength: public LUnaryOperation<1> {
+ public:
+  explicit LJSArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+  DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayLength: public LUnaryOperation<1> {
+ public:
+  explicit LFixedArrayLength(LOperand* input) : LUnaryOperation<1>(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
+};
+
+
+class LValueOf: public LUnaryOperation<1> {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation<1>(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation<0> {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation<0>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation<1> {
+ public:
+  explicit LBitNotI(LOperand* input) : LUnaryOperation<1>(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation<1> {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LBinaryOperation<1> {
+ public:
+  LPower(LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LBinaryOperation<1> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation<1> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation<1>(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation<0> {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation<0>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation<1> {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation<1>(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation<1> {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation<1>(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LUnaryOperation<1> {
+ public:
+  LLoadFunctionPrototype(LOperand* function, LOperand* temporary)
+      : LUnaryOperation<1>(function), temporary_(temporary) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+  LOperand* function() const { return input(); }
+  LOperand* temporary() const { return temporary_; }
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LLoadElements: public LUnaryOperation<1> {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation<1>(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation<1> {
+ public:
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key)
+      : LBinaryOperation<1>(elements, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation<1> {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation<1>(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation<0> {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation<0>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int context_chain_length() const {
+    return hydrogen()->context_chain_length();
+  }
+  int slot_index() const { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LUnaryOperation<0> {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation<0>(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> function() { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation<1> {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation<1>(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation<1> {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation<1>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation<1> {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation<1>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation<1> {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp)
+      : LUnaryOperation<1>(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation<1> {
+ public:
+  LDoubleToI(LOperand* value, LOperand* temporary)
+      : LUnaryOperation<1>(value), temporary_(temporary) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temporary() const { return temporary_; }
+
+ private:
+  LOperand* temporary_;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation<1> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation<1>(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation<1> {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation<1>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation<1> {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation<1>(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation<1> {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation<1>(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamed(LOperand* obj, LOperand* val) {
+    this->SetInputAt(0, obj);
+    this->SetInputAt(1, val);
+  }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() const { return this->InputAt(0); }
+  LOperand* value() const { return this->InputAt(1); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
+      : LStoreNamed(obj, val), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+  Handle<Map> transition() const { return hydrogen()->transition(); }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj, LOperand* val)
+      : LStoreNamed(obj, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+};
+
+
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+    this->SetInputAt(0, obj);
+    this->SetInputAt(1, key);
+    this->SetInputAt(2, val);
+  }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() const { return this->InputAt(0); }
+  LOperand* key() const { return this->InputAt(1); }
+  LOperand* value() const { return this->InputAt(2); }
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation<0> {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation<0>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation<0> {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation<0>(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation<0> {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation<0>(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LCheckPrototypeMaps(LOperand* temp) : temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckSmi: public LUnaryOperation<0> {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation<0>(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation<1> {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation<1>(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation<1> {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation<1>(input) { }
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation<1> {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key)
+      : LBinaryOperation<1>(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
   void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand) {
-    UNIMPLEMENTED();
-  }
+                                 LOperand* spill_operand);
 
  private:
   // Arrays of spill slot operands for registers with an assigned spill
@@ -190,72 +1837,9 @@
 };
 
 
-class LPointerMap: public ZoneObject {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
  public:
-  explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
-  int lithium_position() const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void RecordPointer(LOperand* op) { UNIMPLEMENTED(); }
-
- private:
-  ZoneList<LOperand*> pointer_operands_;
-  int position_;
-  int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
-  LEnvironment(Handle<JSFunction> closure,
-               int ast_id,
-               int parameter_count,
-               int argument_count,
-               int value_count,
-               LEnvironment* outer)
-      : closure_(closure),
-        arguments_stack_height_(argument_count),
-        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
-        translation_index_(-1),
-        ast_id_(ast_id),
-        parameter_count_(parameter_count),
-        values_(value_count),
-        representations_(value_count),
-        spilled_registers_(NULL),
-        spilled_double_registers_(NULL),
-        outer_(outer) {
-  }
-
-  Handle<JSFunction> closure() const { return closure_; }
-  int arguments_stack_height() const { return arguments_stack_height_; }
-  int deoptimization_index() const { return deoptimization_index_; }
-  int translation_index() const { return translation_index_; }
-  int ast_id() const { return ast_id_; }
-  int parameter_count() const { return parameter_count_; }
-  const ZoneList<LOperand*>* values() const { return &values_; }
-  LEnvironment* outer() const { return outer_; }
-
- private:
-  Handle<JSFunction> closure_;
-  int arguments_stack_height_;
-  int deoptimization_index_;
-  int translation_index_;
-  int ast_id_;
-  int parameter_count_;
-  ZoneList<LOperand*> values_;
-  ZoneList<Representation> representations_;
-
-  // Allocation index indexed arrays of spill slot operands for registers
-  // that are also in spill slots at an OSR entry.  NULL for environments
-  // that do not correspond to an OSR entry.
-  LOperand** spilled_registers_;
-  LOperand** spilled_double_registers_;
-
-  LEnvironment* outer_;
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
 };
 
 
@@ -269,57 +1853,51 @@
       pointer_maps_(8),
       inlined_closures_(1) { }
 
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
   int spill_slot_count() const { return spill_slot_count_; }
   HGraph* graph() const { return graph_; }
   const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
   const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
   const ZoneList<Handle<JSFunction> >* inlined_closures() const {
     return &inlined_closures_;
   }
 
-  LOperand* GetNextSpillSlot(bool double_slot) {
-    UNIMPLEMENTED();
-    return NULL;
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
   }
 
-  LConstantOperand* DefineConstantOperand(HConstant* constant) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LLabel* GetLabel(int block_id) const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  int GetParameterStackSlot(int index) const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
-  LGap* GetGapAt(int index) const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool IsGapAt(int index) const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  int NearestGapPos(int index) const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-#ifdef DEBUG
-  void Verify() { }
-#endif
-
  private:
   int spill_slot_count_;
   HGraph* const graph_;
@@ -348,10 +1926,7 @@
   LChunk* Build();
 
   // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
-    UNIMPLEMENTED(); \
-    return NULL; \
-  }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
   HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
 #undef DECLARE_DO
 
@@ -373,7 +1948,90 @@
 
   void Abort(const char* format, ...);
 
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+                           LUnallocated* result);
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+                                    int index);
+  template<int I, int T>
+      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                Register reg);
+  template<int I, int T>
+      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+                                      XMMRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(XMMRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
 
   LChunk* chunk_;
   HGraph* const graph_;
@@ -390,6 +2048,9 @@
   DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
 };
 
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2846fe2..f95755d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1274,8 +1274,6 @@
 }
 
 
-
-
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 5893a2f..4672960 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,6 +47,7 @@
 
   // Check that the result has the proper alignment and return it.
   ASSERT(IsAddressAligned(result, kAlignment, 0));
+  allocation_size_ += size;
   return reinterpret_cast<void*>(result);
 }
 
diff --git a/src/zone.cc b/src/zone.cc
index 01df450..f8dbaab 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -38,6 +38,7 @@
 Address Zone::limit_ = 0;
 int Zone::zone_excess_limit_ = 256 * MB;
 int Zone::segment_bytes_allocated_ = 0;
+unsigned Zone::allocation_size_ = 0;
 
 bool AssertNoZoneAllocation::allow_allocation_ = true;
 
diff --git a/src/zone.h b/src/zone.h
index dde722f..e299f15 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,6 +71,8 @@
 
   static inline void adjust_segment_bytes_allocated(int delta);
 
+  static unsigned allocation_size_;
+
  private:
 
   // All pointers returned from New() have this alignment.
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 7c1197a..23c2092 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -29,6 +29,10 @@
 
 test-api/Bug*: FAIL
 
+# The problem is that a code object can get a different optimizable flag
+# in crankshaft after creation.
+test-log/EquivalenceOfLoggingAndTraversal: SKIP
+
 
 ##############################################################################
 # BUG(281): This test fails on some Linuxes.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 9539973..6a2f328 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -814,6 +814,75 @@
 }
 
 
+static void* expected_ptr;
+static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
+  void* ptr = v8::External::Unwrap(args.Data());
+  CHECK_EQ(expected_ptr, ptr);
+  return v8::Boolean::New(true);
+}
+
+
+static void TestExternalPointerWrapping() {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  v8::Handle<v8::Value> data = v8::External::Wrap(expected_ptr);
+
+  v8::Handle<v8::Object> obj = v8::Object::New();
+  obj->Set(v8_str("func"),
+           v8::FunctionTemplate::New(callback, data)->GetFunction());
+  env->Global()->Set(v8_str("obj"), obj);
+
+  CHECK(CompileRun(
+        "function foo() {\n"
+        "  for (var i = 0; i < 13; i++) obj.func();\n"
+        "}\n"
+        "foo(), true")->BooleanValue());
+}
+
+
+THREADED_TEST(ExternalWrap) {
+  // Check heap allocated object.
+  int* ptr = new int;
+  expected_ptr = ptr;
+  TestExternalPointerWrapping();
+  delete ptr;
+
+  // Check stack allocated object.
+  int foo;
+  expected_ptr = &foo;
+  TestExternalPointerWrapping();
+
+  // Check not aligned addresses.
+  const int n = 100;
+  char* s = new char[n];
+  for (int i = 0; i < n; i++) {
+    expected_ptr = s + i;
+    TestExternalPointerWrapping();
+  }
+
+  delete[] s;
+
+  // Check several invalid addresses.
+  expected_ptr = reinterpret_cast<void*>(1);
+  TestExternalPointerWrapping();
+
+  expected_ptr = reinterpret_cast<void*>(0xdeadbeef);
+  TestExternalPointerWrapping();
+
+  expected_ptr = reinterpret_cast<void*>(0xdeadbeef + 1);
+  TestExternalPointerWrapping();
+
+#if defined(V8_HOST_ARCH_X64)
+  expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
+  TestExternalPointerWrapping();
+
+  expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef + 1);
+  TestExternalPointerWrapping();
+#endif
+}
+
+
 THREADED_TEST(FindInstanceInPrototypeChain) {
   v8::HandleScope scope;
   LocalContext env;
@@ -2288,6 +2357,30 @@
 }
 
 
+static void check_reference_error_message(
+    v8::Handle<v8::Message> message,
+    v8::Handle<v8::Value> data) {
+  const char* reference_error = "Uncaught ReferenceError: asdf is not defined";
+  CHECK(message->Get()->Equals(v8_str(reference_error)));
+}
+
+
+// Test that overwritten toString methods are not invoked on uncaught
+// exception formatting. However, they are invoked when performing
+// normal error string conversions.
+TEST(APIThrowMessageOverwrittenToString) {
+  v8::HandleScope scope;
+  v8::V8::AddMessageListener(check_reference_error_message);
+  LocalContext context;
+  CompileRun("ReferenceError.prototype.toString ="
+             "  function() { return 'Whoops' }");
+  CompileRun("asdf;");
+  v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
+  CHECK(string->Equals(v8_str("Whoops")));
+  v8::V8::RemoveMessageListeners(check_message);
+}
+
+
 static void receive_message(v8::Handle<v8::Message> message,
                             v8::Handle<v8::Value> data) {
   message->Get();
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index b563f8f..30d708e 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -416,7 +416,7 @@
     }
   }
 
-  // andpd, cmpltsd, movaps, psllq.
+  // andpd, cmpltsd, movaps, psllq, psrlq, por.
   {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
@@ -431,6 +431,18 @@
 
       __ psllq(xmm0, 17);
       __ psllq(xmm1, 42);
+
+      __ psllq(xmm0, xmm1);
+      __ psllq(xmm1, xmm2);
+
+      __ psrlq(xmm0, 17);
+      __ psrlq(xmm1, 42);
+
+      __ psrlq(xmm0, xmm1);
+      __ psrlq(xmm1, xmm2);
+
+      __ por(xmm0, xmm1);
+      __ por(xmm1, xmm2);
     }
   }
 
diff --git a/test/cctest/testcfg.py b/test/cctest/testcfg.py
index 485f2cf..b15342e 100644
--- a/test/cctest/testcfg.py
+++ b/test/cctest/testcfg.py
@@ -92,6 +92,7 @@
         dependency = relative_path[0] + '/' + dependency
       if self.Contains(path, full_path):
         result.append(CcTestCase(full_path, executable, mode, raw_test, dependency, self.context))
+    result.sort()
     return result
 
   def GetTestStatus(self, sections, defs):
diff --git a/test/es5conform/testcfg.py b/test/es5conform/testcfg.py
index 43d6104..e3a60cc 100644
--- a/test/es5conform/testcfg.py
+++ b/test/es5conform/testcfg.py
@@ -82,8 +82,10 @@
     for root, dirs, files in os.walk(current_root):
       for dotted in [x  for x in dirs if x.startswith('.')]:
         dirs.remove(dotted)
+      dirs.sort()
       root_path = root[len(self.root):].split(os.path.sep)
       root_path = current_path + [x for x in root_path if x]
+      files.sort()
       for file in files:
         if file.endswith('.js'):
           full_path = root_path + [file[:-3]]
diff --git a/test/message/testcfg.py b/test/message/testcfg.py
index 7dae047..21a0428 100644
--- a/test/message/testcfg.py
+++ b/test/message/testcfg.py
@@ -107,6 +107,9 @@
     mjsunit = [current_path + [t] for t in self.Ls(self.root)]
     regress = [current_path + ['regress', t] for t in self.Ls(join(self.root, 'regress'))]
     bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
+    mjsunit.sort()
+    regress.sort()
+    bugs.sort()
     all_tests = mjsunit + regress + bugs
     result = []
     for test in all_tests:
diff --git a/test/mjsunit/array-slice.js b/test/mjsunit/array-slice.js
index 50b5b27..5ae31dc 100644
--- a/test/mjsunit/array-slice.js
+++ b/test/mjsunit/array-slice.js
@@ -231,3 +231,62 @@
   func(['a', 1, undefined], 'a', 1, undefined);
   func(['a', 1, undefined, void(0)], 'a', 1, undefined, void(0));
 })();
+
+// Check slicing on arguments object when missing arguments get assigined.
+(function() {
+  function func(x, y) {
+    assertEquals(1, arguments.length);
+    assertEquals(undefined, y);
+    y = 239;
+    assertEquals(1, arguments.length);  // arguments length is the same.
+    assertEquals([x], Array.prototype.slice.call(arguments, 0));
+  }
+
+  func('a');
+})();
+
+// Check slicing on arguments object when length property has been set.
+(function() {
+  function func(x, y) {
+    assertEquals(1, arguments.length);
+    arguments.length = 7;
+    assertEquals([x,,,,,,,], Array.prototype.slice.call(arguments, 0));
+  }
+
+  func('a');
+})();
+
+// Check slicing on arguments object when length property has been set to
+// some strange value.
+(function() {
+  function func(x, y) {
+    assertEquals(1, arguments.length);
+    arguments.length = 'foobar';
+    assertEquals([], Array.prototype.slice.call(arguments, 0));
+  }
+
+  func('a');
+})();
+
+// Check slicing on arguments object when extra argument has been added
+// via indexed assignment.
+(function() {
+  function func(x, y) {
+    assertEquals(1, arguments.length);
+    arguments[3] = 239;
+    assertEquals([x], Array.prototype.slice.call(arguments, 0));
+  }
+
+  func('a');
+})();
+
+// Check slicing on arguments object when argument has been deleted by index.
+(function() {
+  function func(x, y, z) {
+    assertEquals(3, arguments.length);
+    delete arguments[1];
+    assertEquals([x,,z], Array.prototype.slice.call(arguments, 0));
+  }
+
+  func('a', 'b', 'c');
+})();
diff --git a/test/mjsunit/closures.js b/test/mjsunit/closures.js
new file mode 100644
index 0000000..ee487a4
--- /dev/null
+++ b/test/mjsunit/closures.js
@@ -0,0 +1,45 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function runner(f, expected) {
+  for (var i = 0; i < 1000000; i++) {
+    assertEquals(expected, f.call(this));
+  }
+}
+
+function test(n) {
+  function MyFunction() {
+    var result = n * 2 + arguments.length;
+    return result;
+  }
+  runner(MyFunction, n * 2);
+}
+
+test(1);
+test(42);
+test(239);
+
diff --git a/test/mjsunit/compiler/regress-closures-with-eval.js b/test/mjsunit/compiler/regress-closures-with-eval.js
new file mode 100644
index 0000000..507d74f
--- /dev/null
+++ b/test/mjsunit/compiler/regress-closures-with-eval.js
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verifies that closures in presence of eval work fine.
+function withEval(expr, filter) {
+  function walk(v) {
+    for (var i in v) {
+      for (var i in v) {}
+    }
+    return filter(v);
+  }
+
+  var o = eval(expr);
+  return walk(o);
+}
+
+function makeTagInfoJSON(n) {
+  var a = new Array(n);
+  for (var i = 0; i < n; i++) a.push('{}');
+  return a;
+}
+
+var expr = '([' + makeTagInfoJSON(128).join(', ') + '])'
+
+for (var n = 0; n < 300; n++) {
+  withEval(expr, function(a) { return a; });
+}
diff --git a/test/mjsunit/debug-breakpoints.js b/test/mjsunit/debug-breakpoints.js
index 0bc349c..1302034 100644
--- a/test/mjsunit/debug-breakpoints.js
+++ b/test/mjsunit/debug-breakpoints.js
@@ -118,3 +118,89 @@
 //   b=2;
 // }
 assertTrue(Debug.showBreakPoints(g).indexOf("[B0]") < 0);
+
+
+// Tests for setting break points by script id and position.
+function setBreakpointByPosition(f, position)
+{
+  var break_point = Debug.setBreakPointByScriptIdAndPosition(
+      Debug.findScript(f).id,
+      position + Debug.sourcePosition(f),
+      "",
+      true);
+  return break_point.number();
+}
+
+bp = setBreakpointByPosition(f, 0);
+assertEquals("() {[B0]a=1;b=2}", Debug.showBreakPoints(f));
+Debug.clearBreakPoint(bp);
+assertEquals("() {a=1;b=2}", Debug.showBreakPoints(f));
+bp1 = setBreakpointByPosition(f, 8);
+assertEquals("() {a=1;[B0]b=2}", Debug.showBreakPoints(f));
+bp2 = setBreakpointByPosition(f, 4);
+assertEquals("() {[B0]a=1;[B1]b=2}", Debug.showBreakPoints(f));
+bp3 = setBreakpointByPosition(f, 11);
+assertEquals("() {[B0]a=1;[B1]b=2[B2]}", Debug.showBreakPoints(f));
+Debug.clearBreakPoint(bp1);
+assertEquals("() {[B0]a=1;b=2[B1]}", Debug.showBreakPoints(f));
+Debug.clearBreakPoint(bp2);
+assertEquals("() {a=1;b=2[B0]}", Debug.showBreakPoints(f));
+Debug.clearBreakPoint(bp3);
+assertEquals("() {a=1;b=2}", Debug.showBreakPoints(f));
+
+bp = setBreakpointByPosition(g, 0);
+//function g() {
+//[B0]a=1;
+//b=2;
+//}
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]a=1;") > 0);
+Debug.clearBreakPoint(bp);
+//function g() {
+//a=1;
+//b=2;
+//}
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]") < 0);
+
+//Second test set and clear breakpoints on lines 1, 2 and 3 (column = 0).
+bp1 = setBreakpointByPosition(g, 12);
+//function g() {
+//a=1;
+//[B0]b=2;
+//}
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]b=2;") > 0);
+bp2 = setBreakpointByPosition(g, 5);
+//function g() {
+//[B0]a=1;
+//[B1]b=2;
+//}
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]a=1;") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B1]b=2;") > 0);
+bp3 = setBreakpointByPosition(g, 19);
+//function g() {
+//[B0]a=1;
+//[B1]b=2;
+//}[B2]
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]a=1;") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B1]b=2;") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B2]}") > 0);
+Debug.clearBreakPoint(bp1);
+//function g() {
+//[B0]a=1;
+//b=2;
+//}[B1]
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]a=1;") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B1]}") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B2]") < 0);
+Debug.clearBreakPoint(bp2);
+//function g() {
+//a=1;
+//b=2;
+//}[B0]
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]}") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("[B1]") < 0);
+Debug.clearBreakPoint(bp3);
+//function g() {
+//a=1;
+//b=2;
+//}
+assertTrue(Debug.showBreakPoints(g).indexOf("[B0]") < 0);
diff --git a/test/mjsunit/debug-liveedit-diff.js b/test/mjsunit/debug-liveedit-diff.js
index 7edf704..0d26a30 100644
--- a/test/mjsunit/debug-liveedit-diff.js
+++ b/test/mjsunit/debug-liveedit-diff.js
@@ -31,11 +31,15 @@
 Debug = debug.Debug
 
 function CheckCompareOneWay(s1, s2) {
-  var diff_array = Debug.LiveEdit.TestApi.CompareStringsLinewise(s1, s2);
+  var diff_array = Debug.LiveEdit.TestApi.CompareStrings(s1, s2);
 
   var pos1 = 0;
   var pos2 = 0;
   print("Compare:");
+  print("s1='" + s1 + "'");
+  print("s2='" + s2 + "'");
+  print("Diff:");
+  print("" + diff_array);
   for (var i = 0; i < diff_array.length; i += 3) {
     var similar_length = diff_array[i] - pos1;
     assertEquals(s1.substring(pos1, pos1 + similar_length),
@@ -45,12 +49,12 @@
     pos1 += similar_length;
     pos2 += similar_length;
     print("<<< " + pos1 + " " + diff_array[i + 1]);
-    print(s1.substring(pos1, pos1 + diff_array[i + 1]));
+    print(s1.substring(pos1, diff_array[i + 1]));
     print("===");
-    print(s2.substring(pos2, pos2 + diff_array[i + 2]));
+    print(s2.substring(pos2, diff_array[i + 2]));
     print(">>> " + pos2 + " " + diff_array[i + 2]);
-    pos1 += diff_array[i + 1];
-    pos2 += diff_array[i + 2];
+    pos1 = diff_array[i + 1];
+    pos2 = diff_array[i + 2];
   }
   {
     // After last change
@@ -64,9 +68,18 @@
   print("");
 }
 
-function CheckCompare(s1, s2) {
+function CheckCompareOneWayPlayWithLF(s1, s2) {
+  var s1Oneliner = s1.replace(/\n/g, ' ');
+  var s2Oneliner = s2.replace(/\n/g, ' ');
   CheckCompareOneWay(s1, s2);
-  CheckCompareOneWay(s2, s1);
+  CheckCompareOneWay(s1Oneliner, s2);
+  CheckCompareOneWay(s1, s2Oneliner);
+  CheckCompareOneWay(s1Oneliner, s2Oneliner);
+}
+
+function CheckCompare(s1, s2) {
+  CheckCompareOneWayPlayWithLF(s1, s2);
+  CheckCompareOneWayPlayWithLF(s2, s1);
 }
 
 CheckCompare("", "");
diff --git a/test/mjsunit/debug-liveedit-newsource.js b/test/mjsunit/debug-liveedit-newsource.js
index 7b8945a..a60e69f 100644
--- a/test/mjsunit/debug-liveedit-newsource.js
+++ b/test/mjsunit/debug-liveedit-newsource.js
@@ -64,6 +64,6 @@
 assertEquals("Capybara", ChooseAnimal());
 // Global variable do not get changed (without restarting script).
 assertEquals(25, something1);
-// Function is oneliner, so currently it is treated as damaged and not patched.
-assertEquals(17, ChooseNumber());
+// We should support changes in oneliners.
+assertEquals(18, ChooseNumber());
 assertEquals("Hello Peter", ChooseAnimal.Factory()("Peter"));
diff --git a/test/mjsunit/delay-syntax-error.js b/test/mjsunit/delay-syntax-error.js
index 4fcb143..64cc142 100644
--- a/test/mjsunit/delay-syntax-error.js
+++ b/test/mjsunit/delay-syntax-error.js
@@ -25,17 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// To be compatible with KJS syntax errors for illegal return, break
-// and continue should be delayed to runtime.
+// To be compatible with JSC syntax errors for illegal returns should be delayed
+// to runtime.
+// Invalid continue and break statements are caught at compile time.
 
-// Do not throw syntax errors for illegal return, break and continue
-// at compile time.
+// Do not throw syntax errors for illegal return at compile time.
 assertDoesNotThrow("if (false) return;");
-assertDoesNotThrow("if (false) break;");
-assertDoesNotThrow("if (false) continue;");
 
-// Throw syntax errors for illegal return, break and continue at
-// compile time.
+// Throw syntax errors for illegal break and continue at compile time.
+assertThrows("if (false) break;");
+assertThrows("if (false) continue;");
+
+// Throw syntax errors for illegal return, break and continue at runtime.
 assertThrows("return;");
 assertThrows("break;");
 assertThrows("continue;");
diff --git a/test/mjsunit/error-constructors.js b/test/mjsunit/error-constructors.js
index ca2aa06..8f463fc 100644
--- a/test/mjsunit/error-constructors.js
+++ b/test/mjsunit/error-constructors.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,3 +30,32 @@
 Error.prototype.toString = Object.prototype.toString;
 assertEquals("[object Error]", Error.prototype.toString());
 assertEquals(Object.prototype, Error.prototype.__proto__);
+
+// Check that error construction does not call setters for the
+// properties on error objects in prototypes.
+function fail() { assertTrue(false); };
+ReferenceError.prototype.__defineSetter__('stack', fail);
+ReferenceError.prototype.__defineSetter__('message', fail);
+ReferenceError.prototype.__defineSetter__('type', fail);
+ReferenceError.prototype.__defineSetter__('arguments', fail);
+var e0 = new ReferenceError();
+var e1 = new ReferenceError('123');
+assertTrue(e1.hasOwnProperty('message'));
+assertTrue(e0.hasOwnProperty('stack'));
+assertTrue(e1.hasOwnProperty('stack'));
+assertTrue(e0.hasOwnProperty('type'));
+assertTrue(e1.hasOwnProperty('type'));
+assertTrue(e0.hasOwnProperty('arguments'));
+assertTrue(e1.hasOwnProperty('arguments'));
+
+// Check that the name property on error prototypes is read-only and
+// dont-delete. This is not specified, but allowing overwriting the
+// name property with a getter can leaks error objects from different
+// script tags in the same context in a browser setting. We therefore
+// disallow changes to the name property on error objects.
+assertEquals("ReferenceError", ReferenceError.prototype.name);
+delete ReferenceError.prototype.name;
+assertEquals("ReferenceError", ReferenceError.prototype.name);
+ReferenceError.prototype.name = "not a reference error";
+assertEquals("ReferenceError", ReferenceError.prototype.name);
+
diff --git a/test/mjsunit/regress/regress-1036894.js b/test/mjsunit/regress/regress-1036894.js
index d89ceda..03ed8f9 100644
--- a/test/mjsunit/regress/regress-1036894.js
+++ b/test/mjsunit/regress/regress-1036894.js
@@ -25,14 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-xeval = function(s) { eval(s); }
-xeval("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }");
+assertThrows("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }");
 
-foo = function() { eval("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }"); }
+function foo() {
+  assertThrows("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }");
+}
 foo();
 
-xeval = function(s) { eval(s); }
-eval("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }");
+assertThrows("$=function anonymous() { /*noex*/do {} while(({ get x(x) { break ; }, set x() { (undefined);} })); }");
 
 xeval = function(s) { eval(s); }
 xeval('$=function(){L: {break L;break L;}};');
diff --git a/test/mjsunit/regress/regress-990205.js b/test/mjsunit/regress/regress-990205.js
index 1ab5bf8..b3024c2 100644
--- a/test/mjsunit/regress/regress-990205.js
+++ b/test/mjsunit/regress/regress-990205.js
@@ -25,11 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// We throw syntax errors early for invalid break and continue statements.
+// (Notice that the example isn't valid ECMAScript due to the
+// function declaration that is not at top level.)
+
 function f() {
   // Force eager compilation of x through the use of eval. The break
   // in function x should not try to break out of the enclosing while.
   return eval("while(0) function x() { break; }; 42");
 };
 
-assertEquals(42, f());
+assertThrows("f()");
 
diff --git a/test/mjsunit/regress/regress-create-exception.js b/test/mjsunit/regress/regress-create-exception.js
index 7d53f1c..2119ce2 100644
--- a/test/mjsunit/regress/regress-create-exception.js
+++ b/test/mjsunit/regress/regress-create-exception.js
@@ -49,7 +49,7 @@
       return j;  // Make sure that future optimizations don't eliminate j.
     } catch(e) {
       ok = true;
-      assertTrue(re.test(e));
+      assertTrue(re.test(e), 'e: ' + e);
     }
     assertTrue(ok);
   }
diff --git a/test/mjsunit/testcfg.py b/test/mjsunit/testcfg.py
index d8fe24d..5cb46bc 100644
--- a/test/mjsunit/testcfg.py
+++ b/test/mjsunit/testcfg.py
@@ -111,6 +111,12 @@
     third_party = [current_path + ['third_party', t] for t in self.Ls(join(self.root, 'third_party'))]
     tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
     compiler = [current_path + ['compiler', t] for t in self.Ls(join(self.root, 'compiler'))]
+    mjsunit.sort()
+    regress.sort()
+    bugs.sort()
+    third_party.sort()
+    tools.sort()
+    compiler.sort()
     all_tests = mjsunit + regress + bugs + third_party + tools + compiler
     result = []
     for test in all_tests:
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 1d989ed..ba35bb6 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -833,6 +833,8 @@
 js1_5/Regress/regress-451322: SKIP
 js1_5/extensions/regress-371636: SKIP
 
+# BUG(1040): Allow this test to timeout.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT
 
 [ $arch == arm && $crankshaft ]
 
@@ -840,8 +842,13 @@
 js1_5/Regress/regress-416628: CRASH
 js1_5/Regress/regress-96128-n: PASS || CRASH
 
-# BUG(1031).
-ecma/TypeConversion/9.2: FAIL
+# BUG(1032): test crashes.
+ecma/Date/15.9.3.1-1: PASS || CRASH
+ecma/Date/15.9.3.1-2: PASS || CRASH
+ecma/Date/15.9.3.1-3: PASS || CRASH
+ecma/Date/15.9.3.1-4: PASS || CRASH
+ecma/Date/15.9.3.1-5: PASS || CRASH
+
 
 [ $fast == yes && $arch == arm ]
 
@@ -859,7 +866,6 @@
 js1_5/Regress/regress-280769-1: SKIP
 js1_5/Regress/regress-280769-5: SKIP
 js1_5/GC/regress-306788: SKIP
-js1_5/GC/regress-203278-2: SKIP
 js1_5/GC/regress-278725: SKIP
 js1_5/GC/regress-203278-3: SKIP
 js1_5/GC/regress-311497: SKIP
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index c4db7d9..816343b 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -673,6 +673,7 @@
                 '../../src/x64/full-codegen-x64.cc',
                 '../../src/x64/ic-x64.cc',
                 '../../src/x64/jump-target-x64.cc',
+                '../../src/x64/lithium-codegen-x64.cc',
                 '../../src/x64/lithium-codegen-x64.h',
                 '../../src/x64/lithium-x64.cc',
                 '../../src/x64/lithium-x64.h',
diff --git a/tools/test.py b/tools/test.py
index 7348c62..939ca0c 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1181,6 +1181,12 @@
   result.add_option("--crankshaft",
                     help="Run with the --crankshaft flag",
                     default=False, action="store_true")
+  result.add_option("--shard-count",
+                    help="Split testsuites into this number of shards",
+                    default=1, type="int")
+  result.add_option("--shard-run",
+                    help="Run this shard from the split up tests.",
+                    default=1, type="int")
   result.add_option("--noprof", help="Disable profiling support",
                     default=False)
   return result
@@ -1302,6 +1308,20 @@
   millis = round(d * 1000) % 1000
   return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
 
+def ShardTests(tests, options):
+  if options.shard_count < 2:
+    return tests
+  if options.shard_run < 1 or options.shard_run > options.shard_count:
+    print "shard-run not a valid number, should be in [1:shard-count]"
+    print "defaulting back to running all tests"
+    return tests
+  count = 0;
+  shard = []
+  for test in tests:
+    if count % options.shard_count == options.shard_run - 1:
+      shard.append(test);
+    count += 1
+  return shard
 
 def Main():
   parser = BuildOptions()
@@ -1385,7 +1405,7 @@
         globally_unused_rules = set(unused_rules)
       else:
         globally_unused_rules = globally_unused_rules.intersection(unused_rules)
-      all_cases += cases
+      all_cases += ShardTests(cases, options)
       all_unused.append(unused_rules)
 
   if options.cat:
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index da070d6..b994d36 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -19,6 +19,9 @@
 				893988100F2A3647007D5254 /* PBXTargetDependency */,
 				896FD03E0E78D731003DFB6A /* PBXTargetDependency */,
 				896FD0400E78D735003DFB6A /* PBXTargetDependency */,
+				8938A29912D63A680080CDDE /* PBXTargetDependency */,
+				8938A29712D63A680080CDDE /* PBXTargetDependency */,
+				8938A29512D63A680080CDDE /* PBXTargetDependency */,
 			);
 			name = All;
 			productName = All;
@@ -44,6 +47,7 @@
 		890A14020EE9C4B400E49346 /* regexp-macro-assembler-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */; };
 		890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */; };
 		890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */; };
+		8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8938A2A212D63B630080CDDE /* lithium-x64.cc */; };
 		893988070F2A35FA007D5254 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
 		8939880D0F2A362A007D5254 /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
 		893988160F2A3688007D5254 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; };
@@ -96,6 +100,9 @@
 		8946827612C26EB700C914BC /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; };
 		89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
+		894A59E912D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
+		894A59EA12D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
+		894A59EB12D777E80000766D /* lithium.cc in Sources */ = {isa = PBXBuildFile; fileRef = 894A59E712D777E80000766D /* lithium.cc */; };
 		8956922A12D4ED240072C313 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
 		8956922B12D4ED240072C313 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
 		8956922C12D4ED240072C313 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
@@ -380,6 +387,7 @@
 		89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */; };
 		89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17D0E719B8F00D62E90 /* simulator-arm.cc */; };
 		89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */; };
+		89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */; };
 		89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
 		9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
 		9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
@@ -438,6 +446,27 @@
 			remoteGlobalIDString = 897F76790E71B4CC007ACF34;
 			remoteInfo = v8_shell;
 		};
+		8938A29412D63A680080CDDE /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 89B91BC012D4F02A002FF4BC;
+			remoteInfo = "d8_shell-x64";
+		};
+		8938A29612D63A680080CDDE /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 89B91BB412D4F02A002FF4BC;
+			remoteInfo = "v8_shell-x64";
+		};
+		8938A29812D63A680080CDDE /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 8956922712D4ED240072C313;
+			remoteInfo = "v8-x64";
+		};
 		893988020F2A35FA007D5254 /* PBXContainerItemProxy */ = {
 			isa = PBXContainerItemProxy;
 			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
@@ -527,6 +556,7 @@
 		58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; };
 		58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
 		8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
+		8938A2A212D63B630080CDDE /* lithium-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-x64.cc"; path = "x64/lithium-x64.cc"; sourceTree = "<group>"; };
 		893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
 		8939880B0F2A35FA007D5254 /* d8 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = d8; sourceTree = BUILT_PRODUCTS_DIR; };
 		893988150F2A3686007D5254 /* d8-debug.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-debug.cc"; path = "../src/d8-debug.cc"; sourceTree = "<group>"; };
@@ -595,6 +625,8 @@
 		89471C7F0EB23EE400B6874B /* flag-definitions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flag-definitions.h"; sourceTree = "<group>"; };
 		89495E460E79FC23001F68C3 /* compilation-cache.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "compilation-cache.cc"; sourceTree = "<group>"; };
 		89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
+		894A59E712D777E80000766D /* lithium.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = lithium.cc; sourceTree = "<group>"; };
+		894A59E812D777E80000766D /* lithium.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lithium.h; sourceTree = "<group>"; };
 		895692AA12D4ED240072C313 /* libv8-x64.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-x64.a"; sourceTree = BUILT_PRODUCTS_DIR; };
 		8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; };
 		8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; };
@@ -856,6 +888,7 @@
 		89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
 		89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
 		89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
+		89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; };
 		89FB0E360F8E531900B04B3C /* d8-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-posix.cc"; path = "../src/d8-posix.cc"; sourceTree = "<group>"; };
 		89FB0E370F8E531900B04B3C /* d8-windows.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-windows.cc"; path = "../src/d8-windows.cc"; sourceTree = "<group>"; };
 		9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "heap-profiler.cc"; sourceTree = "<group>"; };
@@ -1146,6 +1179,8 @@
 				897FF1510E719B8F00D62E90 /* list.h */,
 				893E249312B14B3D0083370F /* lithium-allocator.cc */,
 				893E249412B14B3D0083370F /* lithium-allocator.h */,
+				894A59E712D777E80000766D /* lithium.cc */,
+				894A59E812D777E80000766D /* lithium.h */,
 				9FA38BA91175B2D200C4CD55 /* liveedit.cc */,
 				9FA38BAA1175B2D200C4CD55 /* liveedit.h */,
 				22A76C900FF259E600FDC694 /* log-inl.h */,
@@ -1405,7 +1440,9 @@
 				89B91B8A12D4EF95002FF4BC /* full-codegen-x64.cc */,
 				89B91B8B12D4EF95002FF4BC /* ic-x64.cc */,
 				89B91B8C12D4EF95002FF4BC /* jump-target-x64.cc */,
+				89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */,
 				89B91B8D12D4EF95002FF4BC /* lithium-codegen-x64.h */,
+				8938A2A212D63B630080CDDE /* lithium-x64.cc */,
 				89B91B8E12D4EF95002FF4BC /* lithium-x64.h */,
 				89B91B8F12D4EF95002FF4BC /* macro-assembler-x64.cc */,
 				89B91B9012D4EF95002FF4BC /* macro-assembler-x64.h */,
@@ -1918,6 +1955,9 @@
 				89B91BAA12D4EF95002FF4BC /* simulator-x64.cc in Sources */,
 				89B91BAB12D4EF95002FF4BC /* stub-cache-x64.cc in Sources */,
 				89B91BAC12D4EF95002FF4BC /* virtual-frame-x64.cc in Sources */,
+				8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */,
+				894A59E912D777E80000766D /* lithium.cc in Sources */,
+				89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -2052,6 +2092,7 @@
 				893E24DC12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
 				893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
 				8946827512C26EB700C914BC /* objects-printer.cc in Sources */,
+				894A59EB12D777E80000766D /* lithium.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -2226,6 +2267,7 @@
 				893E24DE12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
 				893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
 				8946827612C26EB700C914BC /* objects-printer.cc in Sources */,
+				894A59EA12D777E80000766D /* lithium.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -2250,6 +2292,21 @@
 			target = 897F76790E71B4CC007ACF34 /* v8_shell */;
 			targetProxy = 7BF891980E73099F000BAF8A /* PBXContainerItemProxy */;
 		};
+		8938A29512D63A680080CDDE /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 89B91BC012D4F02A002FF4BC /* d8_shell-x64 */;
+			targetProxy = 8938A29412D63A680080CDDE /* PBXContainerItemProxy */;
+		};
+		8938A29712D63A680080CDDE /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 89B91BB412D4F02A002FF4BC /* v8_shell-x64 */;
+			targetProxy = 8938A29612D63A680080CDDE /* PBXContainerItemProxy */;
+		};
+		8938A29912D63A680080CDDE /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 8956922712D4ED240072C313 /* v8-x64 */;
+			targetProxy = 8938A29812D63A680080CDDE /* PBXContainerItemProxy */;
+		};
 		893988010F2A35FA007D5254 /* PBXTargetDependency */ = {
 			isa = PBXTargetDependency;
 			target = 8970F2EF0E719FB2006AE7B5 /* v8 */;