Version 2.0.4

Added ECMAScript 5 Object.create.

Improved performance of Math.max and Math.min.

Optimized adding of strings on 64-bit platforms.

Improved handling of external strings by using a separate table instead of weak handles.  This improves garbage collection performance and uses less memory.

Changed code generation for object and array literals in toplevel code to be more compact by doing more work in the runtime.

Fixed a crash bug triggered when garbage collection happened during generation of a callback load inline cache stub.

Fixed crash bug sometimes triggered when local variables shadowed parameters in functions that used the arguments object.


git-svn-id: http://v8.googlecode.com/svn/trunk@3475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 3b0df17..28996b0 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -106,7 +106,6 @@
     zone.cc
     """),
   'arch:arm': Split("""
-    arm/assembler-arm.cc
     arm/builtins-arm.cc
     arm/codegen-arm.cc
     arm/constants-arm.cc
@@ -123,6 +122,12 @@
     arm/stub-cache-arm.cc
     arm/virtual-frame-arm.cc
     """),
+  'armvariant:arm': Split("""
+    arm/assembler-arm.cc
+    """),
+  'armvariant:thumb2': Split("""
+    arm/assembler-thumb2.cc
+    """),
   'arch:ia32': Split("""
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
diff --git a/src/api.cc b/src/api.cc
index 93807a7..d793b9f 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3082,81 +3082,13 @@
 }
 
 
-static void DisposeExternalString(v8::Persistent<v8::Value> obj,
-                                  void* parameter) {
-  ENTER_V8;
-  i::ExternalTwoByteString* str =
-      i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
-
-  // External symbols are deleted when they are pruned out of the symbol
-  // table. Generally external symbols are not registered with the weak handle
-  // callbacks unless they are upgraded to a symbol after being externalized.
-  if (!str->IsSymbol()) {
-    v8::String::ExternalStringResource* resource =
-        reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
-    if (resource != NULL) {
-      const int total_size =
-          static_cast<int>(resource->length() * sizeof(*resource->data()));
-      i::Counters::total_external_string_memory.Decrement(total_size);
-
-      // The object will continue to live in the JavaScript heap until the
-      // handle is entirely cleaned out by the next GC. For example the
-      // destructor for the resource below could bring it back to life again.
-      // Which is why we make sure to not have a dangling pointer here.
-      str->set_resource(NULL);
-      delete resource;
-    }
-  }
-
-  // In any case we do not need this handle any longer.
-  obj.Dispose();
-}
-
-
-static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
-                                       void* parameter) {
-  ENTER_V8;
-  i::ExternalAsciiString* str =
-      i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
-
-  // External symbols are deleted when they are pruned out of the symbol
-  // table. Generally external symbols are not registered with the weak handle
-  // callbacks unless they are upgraded to a symbol after being externalized.
-  if (!str->IsSymbol()) {
-    v8::String::ExternalAsciiStringResource* resource =
-        reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
-    if (resource != NULL) {
-      const int total_size =
-          static_cast<int>(resource->length() * sizeof(*resource->data()));
-      i::Counters::total_external_string_memory.Decrement(total_size);
-
-      // The object will continue to live in the JavaScript heap until the
-      // handle is entirely cleaned out by the next GC. For example the
-      // destructor for the resource below could bring it back to life again.
-      // Which is why we make sure to not have a dangling pointer here.
-      str->set_resource(NULL);
-      delete resource;
-    }
-  }
-
-  // In any case we do not need this handle any longer.
-  obj.Dispose();
-}
-
-
 Local<String> v8::String::NewExternal(
       v8::String::ExternalStringResource* resource) {
   EnsureInitialized("v8::String::NewExternal()");
   LOG_API("String::NewExternal");
   ENTER_V8;
-  const int total_size =
-      static_cast<int>(resource->length() * sizeof(*resource->data()));
-  i::Counters::total_external_string_memory.Increment(total_size);
   i::Handle<i::String> result = NewExternalStringHandle(resource);
-  i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
-  i::GlobalHandles::MakeWeak(handle.location(),
-                             resource,
-                             &DisposeExternalString);
+  i::ExternalStringTable::AddString(*result);
   return Utils::ToLocal(result);
 }
 
@@ -3168,13 +3100,7 @@
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
-    // Operation was successful and the string is not a symbol. In this case
-    // we need to make sure that the we call the destructor for the external
-    // resource when no strong references to the string remain.
-    i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
-    i::GlobalHandles::MakeWeak(handle.location(),
-                               resource,
-                               &DisposeExternalString);
+    i::ExternalStringTable::AddString(*obj);
   }
   return result;
 }
@@ -3185,14 +3111,8 @@
   EnsureInitialized("v8::String::NewExternal()");
   LOG_API("String::NewExternal");
   ENTER_V8;
-  const int total_size =
-      static_cast<int>(resource->length() * sizeof(*resource->data()));
-  i::Counters::total_external_string_memory.Increment(total_size);
   i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
-  i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
-  i::GlobalHandles::MakeWeak(handle.location(),
-                             resource,
-                             &DisposeExternalAsciiString);
+  i::ExternalStringTable::AddString(*result);
   return Utils::ToLocal(result);
 }
 
@@ -3205,13 +3125,7 @@
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
-    // Operation was successful and the string is not a symbol. In this case
-    // we need to make sure that the we call the destructor for the external
-    // resource when no strong references to the string remain.
-    i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
-    i::GlobalHandles::MakeWeak(handle.location(),
-                               resource,
-                               &DisposeExternalAsciiString);
+    i::ExternalStringTable::AddString(*obj);
   }
   return result;
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d924728..07da800 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -114,55 +114,55 @@
 
 // Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "sN:sM" is the same as "dN/2".
-Register s0  = {  0 };
-Register s1  = {  1 };
-Register s2  = {  2 };
-Register s3  = {  3 };
-Register s4  = {  4 };
-Register s5  = {  5 };
-Register s6  = {  6 };
-Register s7  = {  7 };
-Register s8  = {  8 };
-Register s9  = {  9 };
-Register s10 = { 10 };
-Register s11 = { 11 };
-Register s12 = { 12 };
-Register s13 = { 13 };
-Register s14 = { 14 };
-Register s15 = { 15 };
-Register s16 = { 16 };
-Register s17 = { 17 };
-Register s18 = { 18 };
-Register s19 = { 19 };
-Register s20 = { 20 };
-Register s21 = { 21 };
-Register s22 = { 22 };
-Register s23 = { 23 };
-Register s24 = { 24 };
-Register s25 = { 25 };
-Register s26 = { 26 };
-Register s27 = { 27 };
-Register s28 = { 28 };
-Register s29 = { 29 };
-Register s30 = { 30 };
-Register s31 = { 31 };
+SwVfpRegister s0  = {  0 };
+SwVfpRegister s1  = {  1 };
+SwVfpRegister s2  = {  2 };
+SwVfpRegister s3  = {  3 };
+SwVfpRegister s4  = {  4 };
+SwVfpRegister s5  = {  5 };
+SwVfpRegister s6  = {  6 };
+SwVfpRegister s7  = {  7 };
+SwVfpRegister s8  = {  8 };
+SwVfpRegister s9  = {  9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
 
-Register d0  = {  0 };
-Register d1  = {  1 };
-Register d2  = {  2 };
-Register d3  = {  3 };
-Register d4  = {  4 };
-Register d5  = {  5 };
-Register d6  = {  6 };
-Register d7  = {  7 };
-Register d8  = {  8 };
-Register d9  = {  9 };
-Register d10 = { 10 };
-Register d11 = { 11 };
-Register d12 = { 12 };
-Register d13 = { 13 };
-Register d14 = { 14 };
-Register d15 = { 15 };
+DwVfpRegister d0  = {  0 };
+DwVfpRegister d1  = {  1 };
+DwVfpRegister d2  = {  2 };
+DwVfpRegister d3  = {  3 };
+DwVfpRegister d4  = {  4 };
+DwVfpRegister d5  = {  5 };
+DwVfpRegister d6  = {  6 };
+DwVfpRegister d7  = {  7 };
+DwVfpRegister d8  = {  8 };
+DwVfpRegister d9  = {  9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
 
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfo
@@ -1371,11 +1371,10 @@
 
 
 // Support for VFP.
-void Assembler::fmdrr(const Register dst,
-                      const Register src1,
-                      const Register src2,
-                      const SBit s,
-                      const Condition cond) {
+void Assembler::vmov(const DwVfpRegister dst,
+                     const Register src1,
+                     const Register src2,
+                     const Condition cond) {
   // Dm = <Rt,Rt2>.
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@@ -1387,11 +1386,10 @@
 }
 
 
-void Assembler::fmrrd(const Register dst1,
-                      const Register dst2,
-                      const Register src,
-                      const SBit s,
-                      const Condition cond) {
+void Assembler::vmov(const Register dst1,
+                     const Register dst2,
+                     const DwVfpRegister src,
+                     const Condition cond) {
   // <Rt,Rt2> = Dm.
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@@ -1403,9 +1401,8 @@
 }
 
 
-void Assembler::fmsr(const Register dst,
+void Assembler::vmov(const SwVfpRegister dst,
                      const Register src,
-                     const SBit s,
                      const Condition cond) {
   // Sn = Rt.
   // Instruction details available in ARM DDI 0406A, A8-642.
@@ -1418,9 +1415,8 @@
 }
 
 
-void Assembler::fmrs(const Register dst,
-                     const Register src,
-                     const SBit s,
+void Assembler::vmov(const Register dst,
+                     const SwVfpRegister src,
                      const Condition cond) {
   // Rt = Sn.
   // Instruction details available in ARM DDI 0406A, A8-642.
@@ -1433,10 +1429,9 @@
 }
 
 
-void Assembler::fsitod(const Register dst,
-                       const Register src,
-                       const SBit s,
-                       const Condition cond) {
+void Assembler::vcvt(const DwVfpRegister dst,
+                     const SwVfpRegister src,
+                     const Condition cond) {
   // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
   // Instruction details available in ARM DDI 0406A, A8-576.
   // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@@ -1448,10 +1443,9 @@
 }
 
 
-void Assembler::ftosid(const Register dst,
-                       const Register src,
-                       const SBit s,
-                       const Condition cond) {
+void Assembler::vcvt(const SwVfpRegister dst,
+                     const DwVfpRegister src,
+                     const Condition cond) {
   // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
   // Instruction details available in ARM DDI 0406A, A8-576.
   // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@@ -1463,12 +1457,11 @@
 }
 
 
-void Assembler::faddd(const Register dst,
-                      const  Register src1,
-                      const  Register src2,
-                      const  SBit s,
-                      const  Condition cond) {
-  // Dd = faddd(Dn, Dm) double precision floating point addition.
+void Assembler::vadd(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vadd(Dn, Dm) double precision floating point addition.
   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   // Instruction details available in ARM DDI 0406A, A8-536.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1479,12 +1472,11 @@
 }
 
 
-void Assembler::fsubd(const Register dst,
-                      const  Register src1,
-                      const  Register src2,
-                      const  SBit s,
-                      const  Condition cond) {
-  // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+void Assembler::vsub(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1495,12 +1487,11 @@
 }
 
 
-void Assembler::fmuld(const Register dst,
-                      const  Register src1,
-                      const  Register src2,
-                      const  SBit s,
-                      const  Condition cond) {
-  // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+void Assembler::vmul(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@@ -1511,12 +1502,11 @@
 }
 
 
-void Assembler::fdivd(const Register dst,
-                      const  Register src1,
-                      const  Register src2,
-                      const  SBit s,
-                      const  Condition cond) {
-  // Dd = fdivd(Dn, Dm) double precision floating point division.
+void Assembler::vdiv(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vdiv(Dn, Dm) double precision floating point division.
   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   // Instruction details available in ARM DDI 0406A, A8-584.
   // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@@ -1527,8 +1517,8 @@
 }
 
 
-void Assembler::fcmp(const Register src1,
-                     const Register src2,
+void Assembler::vcmp(const DwVfpRegister src1,
+                     const DwVfpRegister src2,
                      const SBit s,
                      const Condition cond) {
   // vcmp(Dd, Dm) double precision floating point comparison.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 86bc18a..cd53dd6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -103,57 +103,94 @@
 extern Register lr;
 extern Register pc;
 
-// Support for VFP registers s0 to s32 (d0 to d16).
-// Note that "sN:sM" is the same as "dN/2".
-extern Register s0;
-extern Register s1;
-extern Register s2;
-extern Register s3;
-extern Register s4;
-extern Register s5;
-extern Register s6;
-extern Register s7;
-extern Register s8;
-extern Register s9;
-extern Register s10;
-extern Register s11;
-extern Register s12;
-extern Register s13;
-extern Register s14;
-extern Register s15;
-extern Register s16;
-extern Register s17;
-extern Register s18;
-extern Register s19;
-extern Register s20;
-extern Register s21;
-extern Register s22;
-extern Register s23;
-extern Register s24;
-extern Register s25;
-extern Register s26;
-extern Register s27;
-extern Register s28;
-extern Register s29;
-extern Register s30;
-extern Register s31;
 
-extern Register d0;
-extern Register d1;
-extern Register d2;
-extern Register d3;
-extern Register d4;
-extern Register d5;
-extern Register d6;
-extern Register d7;
-extern Register d8;
-extern Register d9;
-extern Register d10;
-extern Register d11;
-extern Register d12;
-extern Register d13;
-extern Register d14;
-extern Register d15;
+// Single word VFP register.
+struct SwVfpRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 32; }
+  bool is(SwVfpRegister reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+  // Supporting d0 to d15, can be later extended to d31.
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(DwVfpRegister reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
 
 // Coprocessor register
 struct CRegister {
@@ -759,55 +796,45 @@
   // However, some simple modifications can allow
   // these APIs to support D16 to D31.
 
-  void fmdrr(const Register dst,
-             const Register src1,
-             const Register src2,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fmrrd(const Register dst1,
-             const Register dst2,
-             const Register src,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fmsr(const Register dst,
-            const Register src,
-            const SBit s = LeaveCC,
-            const Condition cond = al);
-  void fmrs(const Register dst,
-            const Register src,
-            const SBit s = LeaveCC,
-            const Condition cond = al);
-  void fsitod(const Register dst,
-              const Register src,
-              const SBit s = LeaveCC,
-              const Condition cond = al);
-  void ftosid(const Register dst,
-              const Register src,
-              const SBit s = LeaveCC,
-              const Condition cond = al);
-
-  void faddd(const Register dst,
-             const Register src1,
-             const Register src2,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fsubd(const Register dst,
-             const Register src1,
-             const Register src2,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fmuld(const Register dst,
-             const Register src1,
-             const Register src2,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fdivd(const Register dst,
-             const Register src1,
-             const Register src2,
-             const SBit s = LeaveCC,
-             const Condition cond = al);
-  void fcmp(const Register src1,
+  void vmov(const DwVfpRegister dst,
+            const Register src1,
             const Register src2,
+            const Condition cond = al);
+  void vmov(const Register dst1,
+            const Register dst2,
+            const DwVfpRegister src,
+            const Condition cond = al);
+  void vmov(const SwVfpRegister dst,
+            const Register src,
+            const Condition cond = al);
+  void vmov(const Register dst,
+            const SwVfpRegister src,
+            const Condition cond = al);
+  void vcvt(const DwVfpRegister dst,
+            const SwVfpRegister src,
+            const Condition cond = al);
+  void vcvt(const SwVfpRegister dst,
+            const DwVfpRegister src,
+            const Condition cond = al);
+
+  void vadd(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vsub(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vmul(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vdiv(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vcmp(const DwVfpRegister src1,
+            const DwVfpRegister src2,
             const SBit s = LeaveCC,
             const Condition cond = al);
   void vmrs(const Register dst,
diff --git a/src/arm/assembler-thumb2-inl.h b/src/arm/assembler-thumb2-inl.h
new file mode 100644
index 0000000..3808ef0
--- /dev/null
+++ b/src/arm/assembler-thumb2-inl.h
@@ -0,0 +1,267 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
+#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
+
+#include "arm/assembler-thumb2.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  ASSERT(cc != al);
+  return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(intptr_t delta) {
+  if (RelocInfo::IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    int32_t* p = reinterpret_cast<int32_t*>(pc_);
+    *p += delta;  // relocate entry
+  }
+  // We do not use pc relative addressing on ARM, so there is
+  // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+  // On ARM a "call instruction" is actually two instructions.
+  //   mov lr, pc
+  //   ldr pc, [pc, #XXX]
+  return (Assembler::instr_at(pc_) == kMovLrPc)
+          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+              == kLdrPCPattern);
+}
+
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
+  rm_ = no_reg;
+  imm32_ = immediate;
+  rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Operand::Operand(const ExternalReference& f)  {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Object** opp) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(opp);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Context** cpp) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(cpp);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm32_ =  reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = LSL;
+  shift_imm_ = 0;
+}
+
+
+bool Operand::is_reg() const {
+  return rm_.is_valid() &&
+         rs_.is(no_reg) &&
+         shift_op_ == LSL &&
+         shift_imm_ == 0;
+}
+
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+  if (pc_offset() >= next_buffer_check_) {
+    CheckConstPool(false, true);
+  }
+}
+
+
+void Assembler::emit(Instr x) {
+  CheckBuffer();
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+  Instr instr = Memory::int32_at(pc);
+  // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+  ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+  int offset = instr & 0xfff;  // offset_12 is unsigned
+  if ((instr & (1 << 23)) == 0) offset = -offset;  // U bit defines offset sign
+  // Verify that the constant pool comes after the instruction referencing it.
+  ASSERT(offset >= -4);
+  return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_at(Address constant_pool_entry,
+                              Address target) {
+  Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  Memory::Address_at(target_address_address_at(pc)) = target;
+  // Intuitively, we would think it is necessary to flush the instruction cache
+  // after patching a target address in the code as follows:
+  //   CPU::FlushICache(pc, sizeof(target));
+  // However, on ARM, no instruction was actually patched by the assignment
+  // above; the target address is not part of an instruction, it is patched in
+  // the constant pool and is read via a data access; the instruction accessing
+  // this address in the constant pool remains unchanged.
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/src/arm/assembler-thumb2.cc b/src/arm/assembler-thumb2.cc
new file mode 100644
index 0000000..6c2b903
--- /dev/null
+++ b/src/arm/assembler-thumb2.cc
@@ -0,0 +1,1821 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arm/assembler-thumb2-inl.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+  // If the compiler is allowed to use vfp then we can use vfp too in our
+  // code generation.
+#if !defined(__arm__)
+  // For the simulator=arm build, always use VFP since the arm simulator has
+  // VFP support.
+  supported_ |= 1u << VFP3;
+#else
+  if (Serializer::enabled()) {
+    supported_ |= OS::CpuFeaturesImpliedByPlatform();
+    return;  // No features if we might serialize.
+  }
+
+  if (OS::ArmCpuHasFeature(VFP3)) {
+    // This implementation also sets the VFP flags if
+    // runtime detection of VFP returns true.
+    supported_ |= 1u << VFP3;
+    found_by_runtime_probing_ |= 1u << VFP3;
+  }
+#endif
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0  = {  0 };
+Register r1  = {  1 };
+Register r2  = {  2 };
+Register r3  = {  3 };
+Register r4  = {  4 };
+Register r5  = {  5 };
+Register r6  = {  6 };
+Register r7  = {  7 };
+Register r8  = {  8 };
+Register r9  = {  9 };
+Register r10 = { 10 };
+Register fp  = { 11 };
+Register ip  = { 12 };
+Register sp  = { 13 };
+Register lr  = { 14 };
+Register pc  = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0  = {  0 };
+CRegister cr1  = {  1 };
+CRegister cr2  = {  2 };
+CRegister cr3  = {  3 };
+CRegister cr4  = {  4 };
+CRegister cr5  = {  5 };
+CRegister cr6  = {  6 };
+CRegister cr7  = {  7 };
+CRegister cr8  = {  8 };
+CRegister cr9  = {  9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+SwVfpRegister s0  = {  0 };
+SwVfpRegister s1  = {  1 };
+SwVfpRegister s2  = {  2 };
+SwVfpRegister s3  = {  3 };
+SwVfpRegister s4  = {  4 };
+SwVfpRegister s5  = {  5 };
+SwVfpRegister s6  = {  6 };
+SwVfpRegister s7  = {  7 };
+SwVfpRegister s8  = {  8 };
+SwVfpRegister s9  = {  9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
+
+DwVfpRegister d0  = {  0 };
+DwVfpRegister d1  = {  1 };
+DwVfpRegister d2  = {  2 };
+DwVfpRegister d3  = {  3 };
+DwVfpRegister d4  = {  4 };
+DwVfpRegister d5  = {  5 };
+DwVfpRegister d6  = {  6 };
+DwVfpRegister d7  = {  7 };
+DwVfpRegister d8  = {  8 };
+DwVfpRegister d9  = {  9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-thumb2-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    imm32_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    imm32_ =  reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+  ASSERT(is_uint5(shift_imm));
+  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = shift_op;
+  shift_imm_ = shift_imm & 31;
+  if (shift_op == RRX) {
+    // encoded as ROR with shift_imm == 0
+    ASSERT(shift_imm == 0);
+    shift_op_ = ROR;
+    shift_imm_ = 0;
+  }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+  ASSERT(shift_op != RRX);
+  rm_ = rm;
+  rs_ = no_reg;
+  shift_op_ = shift_op;
+  rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+  rn_ = rn;
+  rm_ = no_reg;
+  offset_ = offset;
+  am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+  rn_ = rn;
+  rm_ = rm;
+  shift_op_ = LSL;
+  shift_imm_ = 0;
+  am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+                       ShiftOp shift_op, int shift_imm, AddrMode am) {
+  ASSERT(is_uint5(shift_imm));
+  rn_ = rn;
+  rm_ = rm;
+  shift_op_ = shift_op;
+  shift_imm_ = shift_imm & 31;
+  am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+  H   = 1 << 5,   // halfword (or byte)
+  S6  = 1 << 6,   // signed (or unsigned)
+  L   = 1 << 20,  // load (or store)
+  S   = 1 << 20,  // set condition code (or leave unchanged)
+  W   = 1 << 21,  // writeback base register (or leave unchanged)
+  A   = 1 << 21,  // accumulate in multiply instruction (or not)
+  B   = 1 << 22,  // unsigned byte (or word)
+  N   = 1 << 22,  // long (or short)
+  U   = 1 << 23,  // positive (or negative) offset/index
+  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
+  I   = 1 << 25,  // immediate shifter operand (or not)
+
+  B4  = 1 << 4,
+  B5  = 1 << 5,
+  B6  = 1 << 6,
+  B7  = 1 << 7,
+  B8  = 1 << 8,
+  B9  = 1 << 9,
+  B12 = 1 << 12,
+  B16 = 1 << 16,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B21 = 1 << 21,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+
+  // Instruction bit masks
+  RdMask     = 15 << 12,  // in str instruction
+  CondMask   = 15 << 28,
+  CoprocessorMask = 15 << 8,
+  OpCodeMask = 15 << 21,  // in data-processing instructions
+  Imm24Mask  = (1 << 24) - 1,
+  Off12Mask  = (1 << 12) - 1,
+  // Reserved condition
+  nv = 15 << 28
+};
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+static const Instr kPopInstruction =
+    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+static const Instr kPushRegPattern =
+    al | B26 | 4 | NegPreIndex | sp.code() * B16;
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+static const Instr kPopRegPattern =
+    al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+  num_prinfo_ = 0;
+  next_buffer_check_ = 0;
+  no_const_pool_before_ = 0;
+  last_const_pool_end_ = 0;
+  last_bound_pos_ = 0;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // emit constant pool if necessary
+  CheckConstPool(true, false);
+  ASSERT(num_prinfo_ == 0);
+
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(m >= 4 && IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos)  {
+  Instr instr = instr_at(pos);
+  if ((instr & ~Imm24Mask) == 0) {
+    // Emitted label constant, not part of a branch.
+    return instr - (Code::kHeaderSize - kHeapObjectTag);
+  }
+  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+  if ((instr & CondMask) == nv && (instr & B24) != 0)
+    // blx uses bit 24 to encode bit 2 of imm26
+    imm26 += 2;
+
+  return pos + kPcLoadDelta + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~Imm24Mask) == 0) {
+    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+    return;
+  }
+  int imm26 = target_pos - (pos + kPcLoadDelta);
+  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  if ((instr & CondMask) == nv) {
+    // blx uses bit 24 to encode bit 2 of imm26
+    ASSERT((imm26 & 1) == 0);
+    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+  } else {
+    ASSERT((imm26 & 3) == 0);
+    instr &= ~Imm24Mask;
+  }
+  int imm24 = imm26 >> 2;
+  ASSERT(is_int24(imm24));
+  instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      PrintF("@ %d ", l.pos());
+      Instr instr = instr_at(l.pos());
+      if ((instr & ~Imm24Mask) == 0) {
+        PrintF("value\n");
+      } else {
+        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
+        int cond = instr & CondMask;
+        const char* b;
+        const char* c;
+        if (cond == nv) {
+          b = "blx";
+          c = "";
+        } else {
+          if ((instr & B24) != 0)
+            b = "bl";
+          else
+            b = "b";
+
+          switch (cond) {
+            case eq: c = "eq"; break;
+            case ne: c = "ne"; break;
+            case hs: c = "hs"; break;
+            case lo: c = "lo"; break;
+            case mi: c = "mi"; break;
+            case pl: c = "pl"; break;
+            case vs: c = "vs"; break;
+            case vc: c = "vc"; break;
+            case hi: c = "hi"; break;
+            case ls: c = "ls"; break;
+            case ge: c = "ge"; break;
+            case lt: c = "lt"; break;
+            case gt: c = "gt"; break;
+            case le: c = "le"; break;
+            case al: c = ""; break;
+            default:
+              c = "";
+              UNREACHABLE();
+          }
+        }
+        PrintF("%s%s\n", b, c);
+      }
+      next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    int fixup_pos = L->pos();
+    next(L);  // call next before overwriting link with target at fixup_pos
+    target_at_put(fixup_pos, pos);
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_)
+    last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // append appendix to L's list
+      int fixup_pos;
+      int link = L->pos();
+      do {
+        fixup_pos = link;
+        link = target_at(fixup_pos);
+      } while (link > 0);
+      ASSERT(link == kEndOfChain);
+      target_at_put(fixup_pos, appendix->pos());
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  ASSERT(L->is_linked());
+  int link = target_at(L->pos());
+  if (link > 0) {
+    L->link_to(link);
+  } else {
+    ASSERT(link == kEndOfChain);
+    L->Unuse();
+  }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+                         uint32_t* rotate_imm,
+                         uint32_t* immed_8,
+                         Instr* instr) {
+  // imm32 must be unsigned
+  for (int rot = 0; rot < 16; rot++) {
+    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+    if ((imm8 <= 0xff)) {
+      *rotate_imm = rot;
+      *immed_8 = imm8;
+      return true;
+    }
+  }
+  // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+      *instr ^= 0x2*B21;
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space.  There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+    if (!Serializer::enabled()) {
+      Serializer::TooLateToEnableNow();
+    }
+#endif
+    return Serializer::enabled();
+  } else if (rmode == RelocInfo::NONE) {
+    return false;
+  }
+  return true;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+                         Register rn,
+                         Register rd,
+                         const Operand& x) {
+  CheckBuffer();
+  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+  if (!x.rm_.is_valid()) {
+    // immediate
+    uint32_t rotate_imm;
+    uint32_t immed_8;
+    if (MustUseIp(x.rmode_) ||
+        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+      // The immediate operand cannot be encoded as a shifter operand, so load
+      // it first to register ip and change the original instruction to use ip.
+      // However, if the original instruction is a 'mov rd, x' (not setting the
+      // condition code), then replace it with a 'ldr rd, [pc]'
+      RecordRelocInfo(x.rmode_, x.imm32_);
+      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
+      Condition cond = static_cast<Condition>(instr & CondMask);
+      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
+        ldr(rd, MemOperand(pc, 0), cond);
+      } else {
+        ldr(ip, MemOperand(pc, 0), cond);
+        addrmod1(instr, rn, rd, Operand(ip));
+      }
+      return;
+    }
+    instr |= I | rotate_imm*B8 | immed_8;
+  } else if (!x.rs_.is_valid()) {
+    // immediate shift
+    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+  } else {
+    // register shift
+    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+  }
+  emit(instr | rn.code()*B16 | rd.code()*B12);
+  if (rn.is(pc) || x.rm_.is(pc))
+    // block constant pool emission for one instruction after reading pc
+    BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+  ASSERT((instr & ~(CondMask | B | L)) == B26);
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    int offset_12 = x.offset_;
+    if (offset_12 < 0) {
+      offset_12 = -offset_12;
+      am ^= U;
+    }
+    if (!is_uint12(offset_12)) {
+      // immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed
+      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      mov(ip, Operand(x.offset_), LeaveCC,
+          static_cast<Condition>(instr & CondMask));
+      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+      return;
+    }
+    ASSERT(offset_12 >= 0);  // no masking needed
+    instr |= offset_12;
+  } else {
+    // register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // register offset the constructors make sure than both shift_imm_
+    // and shift_op_ are initialized
+    ASSERT(!x.rm_.is(pc));
+    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+  }
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+  ASSERT(x.rn_.is_valid());
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    int offset_8 = x.offset_;
+    if (offset_8 < 0) {
+      offset_8 = -offset_8;
+      am ^= U;
+    }
+    if (!is_uint8(offset_8)) {
+      // immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed
+      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      mov(ip, Operand(x.offset_), LeaveCC,
+          static_cast<Condition>(instr & CondMask));
+      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+      return;
+    }
+    ASSERT(offset_8 >= 0);  // no masking needed
+    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+  } else if (x.shift_imm_ != 0) {
+    // scaled register offset not supported, load index first
+    // rn (and rd in a load) should never be ip, or will be trashed
+    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+        static_cast<Condition>(instr & CondMask));
+    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+    return;
+  } else {
+    // register offset
+    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
+    instr |= x.rm_.code();
+  }
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+  ASSERT(rl != 0);
+  ASSERT(!rn.is(pc));
+  emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+  // unindexed addressing is not encoded by this function
+  ASSERT_EQ((B27 | B26),
+            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+  int am = x.am_;
+  int offset_8 = x.offset_;
+  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
+  offset_8 >>= 2;
+  if (offset_8 < 0) {
+    offset_8 = -offset_8;
+    am ^= U;
+  }
+  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
+  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+
+  // post-indexed addressing requires W == 1; different than in addrmod2/3
+  if ((am & P) == 0)
+    am |= W;
+
+  ASSERT(offset_8 >= 0);  // no masking needed
+  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(pc_offset());
+  }
+
+  // Block the emission of the constant pool, since the branch instruction must
+  // be emitted at the pc offset recorded by the label
+  BlockConstPoolBefore(pc_offset() + kInstrSize);
+  return target_pos - (pc_offset() + kPcLoadDelta);
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(at_offset);
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+  }
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+  ASSERT((branch_offset & 3) == 0);
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+  if (cond == al)
+    // dead code is a good location to emit the constant pool
+    CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+  ASSERT((branch_offset & 3) == 0);
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) {  // v5 and above
+  WriteRecordedPositions();
+  ASSERT((branch_offset & 1) == 0);
+  int h = ((branch_offset & 2) >> 1)*B24;
+  int imm24 = branch_offset >> 2;
+  ASSERT(is_int24(imm24));
+  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) {  // v5 and above
+  WriteRecordedPositions();
+  ASSERT(!target.is(pc));
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
+  WriteRecordedPositions();
+  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
+  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+                     SBit s, Condition cond) {
+  addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 4*B21 | s, src1, dst, src2);
+
+  // Eliminate pattern: push(r), pop()
+  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
+  //   add(sp, sp, Operand(kPointerSize));
+  // Both instructions can be eliminated.
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+      last_bound_pos_ <= (pc_offset() - pattern_size) &&
+      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+      // pattern
+      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+    pc_ -= 2 * kInstrSize;
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+  addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
+  addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+                    SBit s, Condition cond) {
+  addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+  addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+                    SBit s, Condition cond) {
+  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+                    SBit s, Condition cond) {
+  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  // dst goes in bits 16-19 for this instruction!
+  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+                      Register dstH,
+                      Register src1,
+                      Register src2,
+                      SBit s,
+                      Condition cond) {
+  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(!dstL.is(dstH));
+  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+       src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+  // v5 and above.
+  ASSERT(!dst.is(pc) && !src.is(pc));
+  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+       15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+  ASSERT(!dst.is(pc));
+  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+                    Condition cond) {
+  ASSERT(fields >= B16 && fields < B20);  // at least one field set
+  Instr instr;
+  if (!src.rm_.is_valid()) {
+    // immediate
+    uint32_t rotate_imm;
+    uint32_t immed_8;
+    if (MustUseIp(src.rmode_) ||
+        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+      // immediate operand cannot be encoded, load it first to register ip
+      RecordRelocInfo(src.rmode_, src.imm32_);
+      ldr(ip, MemOperand(pc, 0), cond);
+      msr(fields, Operand(ip), cond);
+      return;
+    }
+    instr = I | rotate_imm*B8 | immed_8;
+  } else {
+    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
+    instr = src.rm_.code();
+  }
+  emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
+  addrmod2(cond | B26 | L, dst, src);
+
+  // Eliminate pattern: push(r), pop(r)
+  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
+  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
+  // Both instructions can be eliminated.
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+      last_bound_pos_ <= (pc_offset() - pattern_size) &&
+      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+      // pattern
+      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
+      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
+    pc_ -= 2 * kInstrSize;
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+  addrmod2(cond | B26, src, dst);
+
+  // Eliminate pattern: pop(), push(r)
+  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
+  // ->  str r, [sp, 0], al
+  int pattern_size = 2 * kInstrSize;
+  if (FLAG_push_pop_elimination &&
+     last_bound_pos_ <= (pc_offset() - pattern_size) &&
+     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
+     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
+    pc_ -= 2 * kInstrSize;
+    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
+    if (FLAG_print_push_pop_elimination) {
+      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+    }
+  }
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+  addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+  addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+  addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+                    Register base,
+                    RegList dst,
+                    Condition cond) {
+  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable
+  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+  addrmod4(cond | B27 | am | L, base, dst);
+
+  // emit the constant pool after a function return implemented by ldm ..{..pc}
+  if (cond == al && (dst & pc.bit()) != 0) {
+    // There is a slight chance that the ldm instruction was actually a call,
+    // in which case it would be wrong to return into the constant pool; we
+    // recognize this case by checking if the emission of the pool was blocked
+    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+    // the case, we emit a jump over the pool.
+    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+  }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+                    Register base,
+                    RegList src,
+                    Condition cond) {
+  addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+  ASSERT(!dst.is(base) && !src.is(base));
+  emit(cond | P | base.code()*B16 | dst.code()*B12 |
+       B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+                     Register src,
+                     Register base,
+                     Condition cond) {
+  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+  ASSERT(!dst.is(base) && !src.is(base));
+  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+       B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+  // The simulator handles these special instructions and stops execution.
+  emit(15 << 28 | ((intptr_t) msg));
+#else
+  // Just issue a simple break instruction for now. Alternatively we could use
+  // the swi(0x9f0001) instruction on Linux.
+  bkpt(0);
+#endif
+}
+
+
+void Assembler::bkpt(uint32_t imm16) {  // v5 and above
+  ASSERT(is_uint16(imm16));
+  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+  ASSERT(is_uint24(imm24));
+  emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+                    int opcode_1,
+                    CRegister crd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+                     int opcode_1,
+                     CRegister crd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+                    int opcode_1,
+                    Register rd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+                     int opcode_1,
+                     Register rd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+                    int opcode_1,
+                    Register rd,
+                    CRegister crn,
+                    CRegister crm,
+                    int opcode_2,
+                    Condition cond) {
+  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+                     int opcode_1,
+                     Register rd,
+                     CRegister crn,
+                     CRegister crm,
+                     int opcode_2) {  // v5 and above
+  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+                    CRegister crd,
+                    const MemOperand& src,
+                    LFlag l,
+                    Condition cond) {
+  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+                    CRegister crd,
+                    Register rn,
+                    int option,
+                    LFlag l,
+                    Condition cond) {
+  // unindexed addressing
+  ASSERT(is_uint8(option));
+  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+       coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+                     CRegister crd,
+                     const MemOperand& src,
+                     LFlag l) {  // v5 and above
+  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+                     CRegister crd,
+                     Register rn,
+                     int option,
+                     LFlag l) {  // v5 and above
+  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+                    CRegister crd,
+                    const MemOperand& dst,
+                    LFlag l,
+                    Condition cond) {
+  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+                    CRegister crd,
+                    Register rn,
+                    int option,
+                    LFlag l,
+                    Condition cond) {
+  // unindexed addressing
+  ASSERT(is_uint8(option));
+  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+       coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+                     coproc, CRegister crd,
+                     const MemOperand& dst,
+                     LFlag l) {  // v5 and above
+  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+                     CRegister crd,
+                     Register rn,
+                     int option,
+                     LFlag l) {  // v5 and above
+  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Support for VFP.
+void Assembler::vmov(const DwVfpRegister dst,
+                     const Register src1,
+                     const Register src2,
+                     const Condition cond) {
+  // Dm = <Rt,Rt2>.
+  // Instruction details available in ARM DDI 0406A, A8-646.
+  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(!src1.is(pc) && !src2.is(pc));
+  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::vmov(const Register dst1,
+                     const Register dst2,
+                     const DwVfpRegister src,
+                     const Condition cond) {
+  // <Rt,Rt2> = Dm.
+  // Instruction details available in ARM DDI 0406A, A8-646.
+  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(!dst1.is(pc) && !dst2.is(pc));
+  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::vmov(const SwVfpRegister dst,
+                     const Register src,
+                     const Condition cond) {
+  // Sn = Rt.
+  // Instruction details available in ARM DDI 0406A, A8-642.
+  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(!src.is(pc));
+  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::vmov(const Register dst,
+                     const SwVfpRegister src,
+                     const Condition cond) {
+  // Rt = Sn.
+  // Instruction details available in ARM DDI 0406A, A8-642.
+  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(!dst.is(pc));
+  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::vcvt(const DwVfpRegister dst,
+                     const SwVfpRegister src,
+                     const Condition cond) {
+  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+  // Instruction details available in ARM DDI 0406A, A8-576.
+  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+       (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::vcvt(const SwVfpRegister dst,
+                     const DwVfpRegister src,
+                     const Condition cond) {
+  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+  // Instruction details available in ARM DDI 0406A, A8-576.
+  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+       0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::vadd(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vadd(Dn, Dm) double precision floating point addition.
+  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+  // Instruction details available in ARM DDI 0406A, A8-536.
+  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vsub(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
+  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+  // Instruction details available in ARM DDI 0406A, A8-784.
+  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmul(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
+  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+  // Instruction details available in ARM DDI 0406A, A8-784.
+  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vdiv(const DwVfpRegister dst,
+                     const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const Condition cond) {
+  // Dd = vdiv(Dn, Dm) double precision floating point division.
+  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+  // Instruction details available in ARM DDI 0406A, A8-584.
+  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vcmp(const DwVfpRegister src1,
+                     const DwVfpRegister src2,
+                     const SBit s,
+                     const Condition cond) {
+  // vcmp(Dd, Dm) double precision floating point comparison.
+  // Instruction details available in ARM DDI 0406A, A8-570.
+  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+  // Instruction details available in ARM DDI 0406A, A8-652.
+  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
+       dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+                    const MemOperand& x,
+                    SBit s,
+                    Condition cond) {
+  int am = x.am_;
+  if (!x.rm_.is_valid()) {
+    // immediate offset
+    if ((am & P) == 0)  // post indexing
+      mov(dst, Operand(x.rn_), s, cond);
+    else if ((am & U) == 0)  // negative indexing
+      sub(dst, x.rn_, Operand(x.offset_), s, cond);
+    else
+      add(dst, x.rn_, Operand(x.offset_), s, cond);
+  } else {
+    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // register offset the constructors make sure than both shift_imm_
+    // and shift_op_ are initialized.
+    ASSERT(!x.rm_.is(pc));
+    if ((am & P) == 0)  // post indexing
+      mov(dst, Operand(x.rn_), s, cond);
+    else if ((am & U) == 0)  // negative indexing
+      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+    else
+      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+  }
+}
+
+
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+  uint32_t dummy1;
+  uint32_t dummy2;
+  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+// Debugging
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else if (buffer_size_ < 1*MB) {
+    desc.buffer_size = 2*buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1*MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // copy the data
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta,
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // none of our relocation types are pc relative pointing outside the code
+  // buffer nor pc absolute pointing inside the code buffer, so there is no need
+  // to relocate any emitted relocation entries
+
+  // relocate pending relocation entries
+  for (int i = 0; i < num_prinfo_; i++) {
+    RelocInfo& rinfo = prinfo_[i];
+    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+           rinfo.rmode() != RelocInfo::POSITION);
+    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+      rinfo.set_pc(rinfo.pc() + pc_delta);
+    }
+  }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+    // Adjust code for new modes
+    ASSERT(RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
+    // these modes do not need an entry in the constant pool
+  } else {
+    ASSERT(num_prinfo_ < kMaxNumPRInfo);
+    prinfo_[num_prinfo_++] = rinfo;
+    // Make sure the constant pool is not emitted in place of the next
+    // instruction for which we just recorded relocation info
+    BlockConstPoolBefore(pc_offset() + kInstrSize);
+  }
+  if (rinfo.rmode() != RelocInfo::NONE) {
+    // Don't record external references unless the heap will be serialized.
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+      if (!Serializer::enabled()) {
+        Serializer::TooLateToEnableNow();
+      }
+#endif
+      if (!Serializer::enabled() && !FLAG_debug_code) {
+        return;
+      }
+    }
+    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    reloc_info_writer.Write(&rinfo);
+  }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+  // Calculate the offset of the next check. It will be overwritten
+  // when a const pool is generated or when const pools are being
+  // blocked for a specific range.
+  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+  // There is nothing to do if there are no pending relocation info entries
+  if (num_prinfo_ == 0) return;
+
+  // We emit a constant pool at regular intervals of about kDistBetweenPools
+  // or when requested by parameter force_emit (e.g. after each function).
+  // We prefer not to emit a jump unless the max distance is reached or if we
+  // are running low on slots, which can happen if a lot of constants are being
+  // emitted (e.g. --debug-code and many static references).
+  int dist = pc_offset() - last_const_pool_end_;
+  if (!force_emit && dist < kMaxDistBetweenPools &&
+      (require_jump || dist < kDistBetweenPools) &&
+      // TODO(1236125): Cleanup the "magic" number below. We know that
+      // the code generation will test every kCheckConstIntervalInst.
+      // Thus we are safe as long as we generate less than 7 constant
+      // entries per instruction.
+      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+    return;
+  }
+
+  // If we did not return by now, we need to emit the constant pool soon.
+
+  // However, some small sequences of instructions must not be broken up by the
+  // insertion of a constant pool; such sequences are protected by setting
+  // no_const_pool_before_, which is checked here. Also, recursive calls to
+  // CheckConstPool are blocked by no_const_pool_before_.
+  if (pc_offset() < no_const_pool_before_) {
+    // Emission is currently blocked; make sure we try again as soon as possible
+    next_buffer_check_ = no_const_pool_before_;
+
+    // Something is wrong if emission is forced and blocked at the same time
+    ASSERT(!force_emit);
+    return;
+  }
+
+  int jump_instr = require_jump ? kInstrSize : 0;
+
+  // Check that the code buffer is large enough before emitting the constant
+  // pool and relocation information (include the jump over the pool and the
+  // constant pool marker).
+  int max_needed_space =
+      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+  // Block recursive calls to CheckConstPool
+  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+                       num_prinfo_*kInstrSize);
+  // Don't bother to check for the emit calls below.
+  next_buffer_check_ = no_const_pool_before_;
+
+  // Emit jump over constant pool if necessary
+  Label after_pool;
+  if (require_jump) b(&after_pool);
+
+  RecordComment("[ Constant Pool");
+
+  // Put down constant pool marker
+  // "Undefined instruction" as specified by A3.1 Instruction set encoding
+  emit(0x03000000 | num_prinfo_);
+
+  // Emit constant pool entries
+  for (int i = 0; i < num_prinfo_; i++) {
+    RelocInfo& rinfo = prinfo_[i];
+    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+           rinfo.rmode() != RelocInfo::POSITION &&
+           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+    Instr instr = instr_at(rinfo.pc());
+
+    // Instruction to patch must be a ldr/str [pc, #offset]
+    // P and U set, B and W clear, Rn == pc, offset12 still 0
+    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+           (2*B25 | P | U | pc.code()*B16));
+    int delta = pc_ - rinfo.pc() - 8;
+    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
+    if (delta < 0) {
+      instr &= ~U;
+      delta = -delta;
+    }
+    ASSERT(is_uint12(delta));
+    instr_at_put(rinfo.pc(), instr + delta);
+    emit(rinfo.data());
+  }
+  num_prinfo_ = 0;
+  last_const_pool_end_ = pc_offset();
+
+  RecordComment("]");
+
+  if (after_pool.is_linked()) {
+    bind(&after_pool);
+  }
+
+  // Since a constant pool was just emitted, move the check offset forward by
+  // the standard interval.
+  next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
new file mode 100644
index 0000000..31e9487
--- /dev/null
+++ b/src/arm/assembler-thumb2.h
@@ -0,0 +1,1027 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
+#define V8_ARM_ASSEMBLER_THUMB2_H_
+#include <stdio.h>
+#include "assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Single word VFP register.
+struct SwVfpRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 32; }
+  bool is(SwVfpRegister reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+  // Supporting d0 to d15, can be later extended to d31.
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(DwVfpRegister reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
+
+// Coprocessor register
+struct CRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
+  bool is(CRegister creg) const  { return code_ == creg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+  p0  = 0,
+  p1  = 1,
+  p2  = 2,
+  p3  = 3,
+  p4  = 4,
+  p5  = 5,
+  p6  = 6,
+  p7  = 7,
+  p8  = 8,
+  p9  = 9,
+  p10 = 10,
+  p11 = 11,
+  p12 = 12,
+  p13 = 13,
+  p14 = 14,
+  p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+  eq =  0 << 28,  // Z set            equal.
+  ne =  1 << 28,  // Z clear          not equal.
+  nz =  1 << 28,  // Z clear          not zero.
+  cs =  2 << 28,  // C set            carry set.
+  hs =  2 << 28,  // C set            unsigned higher or same.
+  cc =  3 << 28,  // C clear          carry clear.
+  lo =  3 << 28,  // C clear          unsigned lower.
+  mi =  4 << 28,  // N set            negative.
+  pl =  5 << 28,  // N clear          positive or zero.
+  vs =  6 << 28,  // V set            overflow.
+  vc =  7 << 28,  // V clear          no overflow.
+  hi =  8 << 28,  // C set, Z clear   unsigned higher.
+  ls =  9 << 28,  // C clear or Z set unsigned lower or same.
+  ge = 10 << 28,  // N == V           greater or equal.
+  lt = 11 << 28,  // N != V           less than.
+  gt = 12 << 28,  // Z clear, N == V  greater than.
+  le = 13 << 28,  // Z set or N != V  less then or equal
+  al = 14 << 28   //                  always.
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case lo:
+      return hi;
+    case hi:
+      return lo;
+    case hs:
+      return ls;
+    case ls:
+      return hs;
+    case lt:
+      return gt;
+    case gt:
+      return lt;
+    case ge:
+      return le;
+    case le:
+      return ge;
+    default:
+      return cc;
+  };
+}
+
+
+// Branch hints are not used on the ARM.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm.  Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+  LSL = 0 << 5,
+  LSR = 1 << 5,
+  ASR = 2 << 5,
+  ROR = 3 << 5,
+  RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+  SetCC   = 1 << 20,  // set condition code
+  LeaveCC = 0 << 20   // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+  CPSR = 0 << 22,
+  SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+  CPSR_c = CPSR | 1 << 16,
+  CPSR_x = CPSR | 1 << 17,
+  CPSR_s = CPSR | 1 << 18,
+  CPSR_f = CPSR | 1 << 19,
+  SPSR_c = SPSR | 1 << 16,
+  SPSR_x = SPSR | 1 << 17,
+  SPSR_s = SPSR | 1 << 18,
+  SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+  // bit encoding P U W
+  Offset       = (8|4|0) << 21,  // offset (without writeback to base)
+  PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
+  PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
+  NegOffset    = (8|0|0) << 21,  // negative offset (without writeback to base)
+  NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
+  NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+  // bit encoding P U W
+  da           = (0|0|0) << 21,  // decrement after
+  ia           = (0|4|0) << 21,  // increment after
+  db           = (8|0|0) << 21,  // decrement before
+  ib           = (8|4|0) << 21,  // increment before
+  da_w         = (0|0|1) << 21,  // decrement after with writeback to base
+  ia_w         = (0|4|1) << 21,  // increment after with writeback to base
+  db_w         = (8|0|1) << 21,  // decrement before with writeback to base
+  ib_w         = (8|4|1) << 21   // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+  Long  = 1 << 22,  // long load/store coprocessor
+  Short = 0 << 22   // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+  // immediate
+  INLINE(explicit Operand(int32_t immediate,
+         RelocInfo::Mode rmode = RelocInfo::NONE));
+  INLINE(explicit Operand(const ExternalReference& f));
+  INLINE(explicit Operand(const char* s));
+  INLINE(explicit Operand(Object** opp));
+  INLINE(explicit Operand(Context** cpp));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // rm
+  INLINE(explicit Operand(Register rm));
+
+  // rm <shift_op> shift_imm
+  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+  // rm <shift_op> rs
+  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  Register rs_;
+  ShiftOp shift_op_;
+  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
+  int32_t imm32_;  // valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+  // [rn +/- offset]      Offset/NegOffset
+  // [rn +/- offset]!     PreIndex/NegPreIndex
+  // [rn], +/- offset     PostIndex/NegPostIndex
+  // offset is any signed 32-bit value; offset is first loaded to register ip if
+  // it does not fit the addressing mode (12-bit unsigned and sign bit)
+  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+  // [rn +/- rm]          Offset/NegOffset
+  // [rn +/- rm]!         PreIndex/NegPreIndex
+  // [rn], +/- rm         PostIndex/NegPostIndex
+  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+  // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
+  // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
+  // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
+  explicit MemOperand(Register rn, Register rm,
+                      ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+  Register rn_;  // base
+  Register rm_;  // register offset
+  int32_t offset_;  // valid if rm_ == no_reg
+  ShiftOp shift_op_;
+  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
+  AddrMode am_;  // bits P, U, and W
+
+  friend class Assembler;
+};
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
+
+  // Check whether a feature is supported by the target CPU.
+  static bool IsSupported(CpuFeature f) {
+    if (f == VFP3 && !FLAG_enable_vfp3) return false;
+    return (supported_ & (1u << f)) != 0;
+  }
+
+  // Check whether a feature is currently enabled.
+  static bool IsEnabled(CpuFeature f) {
+    return (enabled_ & (1u << f)) != 0;
+  }
+
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(CpuFeature f) {
+      ASSERT(CpuFeatures::IsSupported(f));
+      ASSERT(!Serializer::enabled() ||
+             (found_by_runtime_probing_ & (1u << f)) == 0);
+      old_enabled_ = CpuFeatures::enabled_;
+      CpuFeatures::enabled_ |= 1u << f;
+    }
+    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+   private:
+    unsigned old_enabled_;
+#else
+   public:
+    explicit Scope(CpuFeature f) {}
+#endif
+  };
+
+ private:
+  static unsigned supported_;
+  static unsigned enabled_;
+  static unsigned found_by_runtime_probing_;
+};
+
+
+typedef int32_t Instr;
+
+
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
+class Assembler : public Malloced {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  // Manages the jump elimination optimization if the second parameter is true.
+  int branch_offset(Label* L, bool jump_elimination_allowed);
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+  // Return the address in the constant pool of the code target address used by
+  // the branch/call instruction at pc.
+  INLINE(static Address target_address_address_at(Address pc));
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  INLINE(static Address target_address_at(Address pc));
+  INLINE(static void set_target_address_at(Address pc, Address target));
+
+  // This sets the branch destination (which is in the constant pool on ARM).
+  // This is for calls and branches within generated code.
+  inline static void set_target_at(Address constant_pool_entry, Address target);
+
+  // This sets the branch destination (which is in the constant pool on ARM).
+  // This is for calls and branches to runtime code.
+  inline static void set_external_target_at(Address constant_pool_entry,
+                                            Address target) {
+    set_target_at(constant_pool_entry, target);
+  }
+
+  // Here we are patching the address in the constant pool, not the actual call
+  // instruction.  The address in the constant pool is the same size as a
+  // pointer.
+  static const int kCallTargetSize = kPointerSize;
+  static const int kExternalTargetSize = kPointerSize;
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Distance between the instruction referring to the address of the call
+  // target (ldr pc, [target addr in const pool]) and the return address
+  static const int kCallTargetAddressOffset = kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+  // Difference between address of current opcode and value read from pc
+  // register.
+  static const int kPcLoadDelta = 8;
+
+  static const int kJSReturnSequenceLength = 4;
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+
+  // Branch instructions
+  void b(int branch_offset, Condition cond = al);
+  void bl(int branch_offset, Condition cond = al);
+  void blx(int branch_offset);  // v5 and above
+  void blx(Register target, Condition cond = al);  // v5 and above
+  void bx(Register target, Condition cond = al);  // v5 and above, plus v4t
+
+  // Convenience branch instructions using labels
+  void b(Label* L, Condition cond = al)  {
+    b(branch_offset(L, cond == al), cond);
+  }
+  void b(Condition cond, Label* L)  { b(branch_offset(L, cond == al), cond); }
+  void bl(Label* L, Condition cond = al)  { bl(branch_offset(L, false), cond); }
+  void bl(Condition cond, Label* L)  { bl(branch_offset(L, false), cond); }
+  void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
+
+  // Data-processing instructions
+  void and_(Register dst, Register src1, const Operand& src2,
+            SBit s = LeaveCC, Condition cond = al);
+
+  void eor(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void sub(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+  void sub(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al) {
+    sub(dst, src1, Operand(src2), s, cond);
+  }
+
+  void rsb(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void add(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void adc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void sbc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void rsc(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void tst(Register src1, const Operand& src2, Condition cond = al);
+  void tst(Register src1, Register src2, Condition cond = al) {
+    tst(src1, Operand(src2), cond);
+  }
+
+  void teq(Register src1, const Operand& src2, Condition cond = al);
+
+  void cmp(Register src1, const Operand& src2, Condition cond = al);
+  void cmp(Register src1, Register src2, Condition cond = al) {
+    cmp(src1, Operand(src2), cond);
+  }
+
+  void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+  void orr(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+  void orr(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al) {
+    orr(dst, src1, Operand(src2), s, cond);
+  }
+
+  void mov(Register dst, const Operand& src,
+           SBit s = LeaveCC, Condition cond = al);
+  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
+    mov(dst, Operand(src), s, cond);
+  }
+
+  void bic(Register dst, Register src1, const Operand& src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void mvn(Register dst, const Operand& src,
+           SBit s = LeaveCC, Condition cond = al);
+
+  // Multiply instructions
+
+  void mla(Register dst, Register src1, Register src2, Register srcA,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void mul(Register dst, Register src1, Register src2,
+           SBit s = LeaveCC, Condition cond = al);
+
+  void smlal(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void smull(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void umlal(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  void umull(Register dstL, Register dstH, Register src1, Register src2,
+             SBit s = LeaveCC, Condition cond = al);
+
+  // Miscellaneous arithmetic instructions
+
+  void clz(Register dst, Register src, Condition cond = al);  // v5 and above
+
+  // Status register access instructions
+
+  void mrs(Register dst, SRegister s, Condition cond = al);
+  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+  // Load/Store instructions
+  void ldr(Register dst, const MemOperand& src, Condition cond = al);
+  void str(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+  void strb(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+  void strh(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+  // Load/Store multiple instructions
+  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+  // Semaphore instructions
+  void swp(Register dst, Register src, Register base, Condition cond = al);
+  void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg);
+
+  void bkpt(uint32_t imm16);  // v5 and above
+  void swi(uint32_t imm24, Condition cond = al);
+
+  // Coprocessor instructions
+
+  void cdp(Coprocessor coproc, int opcode_1,
+           CRegister crd, CRegister crn, CRegister crm,
+           int opcode_2, Condition cond = al);
+
+  void cdp2(Coprocessor coproc, int opcode_1,
+            CRegister crd, CRegister crn, CRegister crm,
+            int opcode_2);  // v5 and above
+
+  void mcr(Coprocessor coproc, int opcode_1,
+           Register rd, CRegister crn, CRegister crm,
+           int opcode_2 = 0, Condition cond = al);
+
+  void mcr2(Coprocessor coproc, int opcode_1,
+            Register rd, CRegister crn, CRegister crm,
+            int opcode_2 = 0);  // v5 and above
+
+  void mrc(Coprocessor coproc, int opcode_1,
+           Register rd, CRegister crn, CRegister crm,
+           int opcode_2 = 0, Condition cond = al);
+
+  void mrc2(Coprocessor coproc, int opcode_1,
+            Register rd, CRegister crn, CRegister crm,
+            int opcode_2 = 0);  // v5 and above
+
+  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+           LFlag l = Short, Condition cond = al);
+  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+           LFlag l = Short, Condition cond = al);
+
+  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+            LFlag l = Short);  // v5 and above
+  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+            LFlag l = Short);  // v5 and above
+
+  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+           LFlag l = Short, Condition cond = al);
+  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+           LFlag l = Short, Condition cond = al);
+
+  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+            LFlag l = Short);  // v5 and above
+  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+            LFlag l = Short);  // v5 and above
+
+  // Support for VFP.
+  // All these APIs support S0 to S31 and D0 to D15.
+  // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+  // However, some simple modifications can allow
+  // these APIs to support D16 to D31.
+
+  void vmov(const DwVfpRegister dst,
+            const Register src1,
+            const Register src2,
+            const Condition cond = al);
+  void vmov(const Register dst1,
+            const Register dst2,
+            const DwVfpRegister src,
+            const Condition cond = al);
+  void vmov(const SwVfpRegister dst,
+            const Register src,
+            const Condition cond = al);
+  void vmov(const Register dst,
+            const SwVfpRegister src,
+            const Condition cond = al);
+  void vcvt(const DwVfpRegister dst,
+            const SwVfpRegister src,
+            const Condition cond = al);
+  void vcvt(const SwVfpRegister dst,
+            const DwVfpRegister src,
+            const Condition cond = al);
+
+  void vadd(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vsub(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vmul(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vdiv(const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const Condition cond = al);
+  void vcmp(const DwVfpRegister src1,
+            const DwVfpRegister src2,
+            const SBit s = LeaveCC,
+            const Condition cond = al);
+  void vmrs(const Register dst,
+            const Condition cond = al);
+
+  // Pseudo instructions
+  void nop()  { mov(r0, Operand(r0)); }
+
+  void push(Register src, Condition cond = al) {
+    str(src, MemOperand(sp, 4, NegPreIndex), cond);
+  }
+
+  void pop(Register dst, Condition cond = al) {
+    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+  }
+
+  void pop() {
+    add(sp, sp, Operand(kPointerSize));
+  }
+
+  // Load effective address of memory operand x into register dst
+  void lea(Register dst, const MemOperand& x,
+           SBit s = LeaveCC, Condition cond = al);
+
+  // Jump unconditionally to given label.
+  void jmp(Label* L) { b(L, al); }
+
+  // Check the code size generated from label to here.
+  int InstructionsGeneratedSince(Label* l) {
+    return (pc_offset() - l->pos()) / kInstrSize;
+  }
+
+  // Check whether an immediate fits an addressing mode 1 instruction.
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+  // Postpone the generation of the constant pool for the specified number of
+  // instructions.
+  void BlockConstPoolFor(int instructions);
+
+  // Debugging
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  int pc_offset() const { return pc_ - buffer_; }
+  int current_position() const { return current_position_; }
+  int current_statement_position() const { return current_position_; }
+
+ protected:
+  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Read/patch instructions
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
+  // Decode branch instruction at pos and return branch target pos
+  int target_at(int pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos
+  void target_at_put(int pos, int target_pos);
+
+  // Check if is time to emit a constant pool for pending reloc info entries
+  void CheckConstPool(bool force_emit, bool require_jump);
+
+  // Block the emission of the constant pool before pc_offset
+  void BlockConstPoolBefore(int pc_offset) {
+    if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+  }
+
+ private:
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes
+  static const int kBufferCheckInterval = 1*KB/2;
+  int next_buffer_check_;  // pc offset of next buffer check
+
+  // Code generation
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+  byte* pc_;  // the program counter; moves forward
+
+  // Constant pool generation
+  // Pools are emitted in the instruction stream, preferably after unconditional
+  // jumps or after returns from functions (in dead code locations).
+  // If a long code sequence does not contain unconditional jumps, it is
+  // necessary to emit the constant pool before the pool gets too far from the
+  // location it is accessed from. In this case, we emit a jump over the emitted
+  // constant pool.
+  // Constants in the pool may be addresses of functions that gets relocated;
+  // if so, a relocation info entry is associated to the constant pool entry.
+
+  // Repeated checking whether the constant pool should be emitted is rather
+  // expensive. By default we only check again once a number of instructions
+  // has been generated. That also means that the sizing of the buffers is not
+  // an exact science, and that we rely on some slop to not overrun buffers.
+  static const int kCheckConstIntervalInst = 32;
+  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+  // Pools are emitted after function return and in dead code at (more or less)
+  // regular intervals of kDistBetweenPools bytes
+  static const int kDistBetweenPools = 1*KB;
+
+  // Constants in pools are accessed via pc relative addressing, which can
+  // reach +/-4KB thereby defining a maximum distance between the instruction
+  // and the accessed constant. We satisfy this constraint by limiting the
+  // distance between pools.
+  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+  // Emission of the constant pool may be blocked in some code sequences
+  int no_const_pool_before_;  // block emission before this pc offset
+
+  // Keep track of the last emitted pool to guarantee a maximal distance
+  int last_const_pool_end_;  // pc offset following the last constant pool
+
+  // Relocation info generation
+  // Each relocation is encoded as a variable size value
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+  // Relocation info records are also used during code generation as temporary
+  // containers for constants and code target addresses until they are emitted
+  // to the constant pool. These pending relocation info records are temporarily
+  // stored in a separate buffer until a constant pool is emitted.
+  // If every instruction in a long sequence is accessing the pool, we need one
+  // pending relocation entry per instruction.
+  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+  RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
+  int num_prinfo_;  // number of pending reloc info entries in the buffer
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // source position information
+  int current_position_;
+  int current_statement_position_;
+  int written_position_;
+  int written_statement_position_;
+
+  // Code emission
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+
+  // Instruction generation
+  void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+  void addrmod2(Instr instr, Register rd, const MemOperand& x);
+  void addrmod3(Instr instr, Register rd, const MemOperand& x);
+  void addrmod4(Instr instr, Register rn, RegList rl);
+  void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+  // Labels
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+  void next(Label* L);
+
+  // Record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class RegExpMacroAssemblerARM;
+  friend class RelocInfo;
+  friend class CodePatcher;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index ea3df6c..89d974c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1769,9 +1769,7 @@
 
   primitive.Bind();
   frame_->EmitPush(r0);
-  Result arg_count(r0);
-  __ mov(r0, Operand(0));
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
 
   jsobject.Bind();
   // Get the set of properties (as a FixedArray or Map).
@@ -1910,9 +1908,7 @@
   __ ldr(r0, frame_->ElementAt(4));  // push enumerable
   frame_->EmitPush(r0);
   frame_->EmitPush(r3);  // push entry
-  Result arg_count_reg(r0);
-  __ mov(r0, Operand(1));
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
   __ mov(r3, Operand(r0));
 
   // If the property has been removed while iterating, we just skip it.
@@ -3660,9 +3656,7 @@
     if (property != NULL) {
       LoadAndSpill(property->obj());
       LoadAndSpill(property->key());
-      Result arg_count(r0);
-      __ mov(r0, Operand(1));  // not counting receiver
-      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
 
     } else if (variable != NULL) {
       Slot* slot = variable->slot();
@@ -3670,9 +3664,7 @@
         LoadGlobal();
         __ mov(r0, Operand(variable->name()));
         frame_->EmitPush(r0);
-        Result arg_count(r0);
-        __ mov(r0, Operand(1));  // not counting receiver
-        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
 
       } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
         // lookup the context holding the named variable
@@ -3684,9 +3676,7 @@
         frame_->EmitPush(r0);
         __ mov(r0, Operand(variable->name()));
         frame_->EmitPush(r0);
-        Result arg_count(r0);
-        __ mov(r0, Operand(1));  // not counting receiver
-        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
 
       } else {
         // Default: Result of deleting non-global, not dynamically
@@ -3736,9 +3726,7 @@
         smi_label.Branch(eq);
 
         frame_->EmitPush(r0);
-        Result arg_count(r0);
-        __ mov(r0, Operand(0));  // not counting receiver
-        frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+        frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
 
         continue_label.Jump();
         smi_label.Bind();
@@ -3760,9 +3748,7 @@
         __ tst(r0, Operand(kSmiTagMask));
         continue_label.Branch(eq);
         frame_->EmitPush(r0);
-        Result arg_count(r0);
-        __ mov(r0, Operand(0));  // not counting receiver
-        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
         continue_label.Bind();
         break;
       }
@@ -3847,9 +3833,7 @@
     {
       // Convert the operand to a number.
       frame_->EmitPush(r0);
-      Result arg_count(r0);
-      __ mov(r0, Operand(0));
-      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
     }
     if (is_postfix) {
       // Postfix: store to result (on the stack).
@@ -4235,9 +4219,7 @@
     case Token::IN: {
       LoadAndSpill(left);
       LoadAndSpill(right);
-      Result arg_count(r0);
-      __ mov(r0, Operand(1));  // not counting receiver
-      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
       frame_->EmitPush(r0);
       break;
     }
@@ -5079,10 +5061,10 @@
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // ARMv7 VFP3 instructions to implement double precision comparison.
-    __ fmdrr(d6, r0, r1);
-    __ fmdrr(d7, r2, r3);
+    __ vmov(d6, r0, r1);
+    __ vmov(d7, r2, r3);
 
-    __ fcmp(d6, d7);
+    __ vcmp(d6, d7);
     __ vmrs(pc);
     __ mov(r0, Operand(0), LeaveCC, eq);
     __ mov(r0, Operand(1), LeaveCC, lt);
@@ -5145,7 +5127,6 @@
 
   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
-  __ mov(r0, Operand(arg_count));
   __ InvokeBuiltin(native, CALL_JS);
   __ cmp(r0, Operand(0));
   __ pop(pc);
@@ -5244,7 +5225,6 @@
 
     // Only first argument is a string.
     __ bind(&string1);
-    __ mov(r0, Operand(2));  // Set number of arguments.
     __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
 
     // First argument was not a string, test second.
@@ -5256,13 +5236,11 @@
 
     // Only second argument is a string.
     __ b(&not_strings);
-    __ mov(r0, Operand(2));  // Set number of arguments.
     __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
 
     __ bind(&not_strings);
   }
 
-  __ mov(r0, Operand(1));  // Set number of arguments.
   __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
 
   // We branch here if at least one of r0 and r1 is not a Smi.
@@ -5353,22 +5331,22 @@
     CpuFeatures::Scope scope(VFP3);
     // ARMv7 VFP3 instructions to implement
     // double precision, add, subtract, multiply, divide.
-    __ fmdrr(d6, r0, r1);
-    __ fmdrr(d7, r2, r3);
+    __ vmov(d6, r0, r1);
+    __ vmov(d7, r2, r3);
 
     if (Token::MUL == operation) {
-      __ fmuld(d5, d6, d7);
+      __ vmul(d5, d6, d7);
     } else if (Token::DIV == operation) {
-      __ fdivd(d5, d6, d7);
+      __ vdiv(d5, d6, d7);
     } else if (Token::ADD == operation) {
-      __ faddd(d5, d6, d7);
+      __ vadd(d5, d6, d7);
     } else if (Token::SUB == operation) {
-      __ fsubd(d5, d6, d7);
+      __ vsub(d5, d6, d7);
     } else {
       UNREACHABLE();
     }
 
-    __ fmrrd(r0, r1, d5);
+    __ vmov(r0, r1, d5);
 
     __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
     __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@@ -5457,9 +5435,9 @@
     // ARMv7 VFP3 instructions implementing double precision to integer
     // conversion using round to zero.
     __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
-    __ fmdrr(d7, scratch2, scratch);
-    __ ftosid(s15, d7);
-    __ fmrs(dest, s15);
+    __ vmov(d7, scratch2, scratch);
+    __ vcvt(s15, d7);
+    __ vmov(dest, s15);
   } else {
     // Get the top bits of the mantissa.
     __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@@ -5598,7 +5576,6 @@
   __ bind(&slow);
   __ push(r1);  // restore stack
   __ push(r0);
-  __ mov(r0, Operand(1));  // 1 argument (not counting receiver).
   switch (op_) {
     case Token::BIT_OR:
       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5703,6 +5680,29 @@
 }
 
 
+const char* GenericBinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int len = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, len),
+               "GenericBinaryOpStub_%s_%s%s",
+               op_name,
+               overwrite_name,
+               specialized_on_rhs_ ? "_ConstantRhs" : 0);
+  return name_;
+}
+
+
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // r1 : x
   // r0 : y
@@ -5980,7 +5980,6 @@
   // Enter runtime system.
   __ bind(&slow);
   __ push(r0);
-  __ mov(r0, Operand(0));  // Set number of arguments.
   __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
 
   __ bind(&not_smi);
@@ -6456,7 +6455,6 @@
 
   // Slow-case.  Tail call builtin.
   __ bind(&slow);
-  __ mov(r0, Operand(1));  // Arg count without receiver.
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
 }
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index ba7f936..e9f11e9 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -455,13 +455,15 @@
       : op_(op),
         mode_(mode),
         constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+        name_(NULL) { }
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
   int constant_rhs_;
   bool specialized_on_rhs_;
+  char* name_;
 
   static const int kMaxKnownRhs = 0x40000000;
 
@@ -506,22 +508,7 @@
     return key;
   }
 
-  const char* GetName() {
-    switch (op_) {
-      case Token::ADD: return "GenericBinaryOpStub_ADD";
-      case Token::SUB: return "GenericBinaryOpStub_SUB";
-      case Token::MUL: return "GenericBinaryOpStub_MUL";
-      case Token::DIV: return "GenericBinaryOpStub_DIV";
-      case Token::MOD: return "GenericBinaryOpStub_MOD";
-      case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
-      case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
-      case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
-      case Token::SAR: return "GenericBinaryOpStub_SAR";
-      case Token::SHL: return "GenericBinaryOpStub_SHL";
-      case Token::SHR: return "GenericBinaryOpStub_SHR";
-      default:         return "GenericBinaryOpStub";
-    }
-  }
+  const char* GetName();
 
 #ifdef DEBUG
   void Print() {
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 2f9e78f..afed0fa 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -897,15 +897,14 @@
 
 
 // void Decoder::DecodeTypeVFP(Instr* instr)
-// Implements the following VFP instructions:
-// fmsr: Sn = Rt
-// fmrs: Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov: Sn = Rt
+// vmov: Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
 // vcmp(Dd, Dm)
 // VMRS
 void Decoder::DecodeTypeVFP(Instr* instr) {
@@ -997,8 +996,8 @@
 
 
 // Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
 void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 56aacdd..42b3bf3 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -521,21 +521,6 @@
 }
 
 
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-  Expression* expr = stmt->expression();
-  // Complete the statement based on the type of the subexpression.
-  if (expr->AsLiteral() != NULL) {
-    __ mov(r0, Operand(expr->AsLiteral()->handle()));
-  } else {
-    ASSERT_EQ(Expression::kValue, expr->context());
-    Visit(expr);
-    __ pop(r0);
-  }
-  EmitReturnSequence(stmt->statement_pos());
-}
-
-
 void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
@@ -556,18 +541,24 @@
 
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  Expression* rewrite = expr->var()->rewrite();
+  EmitVariableLoad(expr->var(), expr->context());
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+                                         Expression::Context context) {
+  Expression* rewrite = var->rewrite();
   if (rewrite == NULL) {
-    ASSERT(expr->var()->is_global());
+    ASSERT(var->is_global());
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
     // object on the stack.
     __ ldr(ip, CodeGenerator::GlobalObject());
     __ push(ip);
-    __ mov(r2, Operand(expr->name()));
+    __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    DropAndMove(expr->context(), r0);
+    DropAndMove(context, r0);
   } else if (rewrite->AsSlot() != NULL) {
     Slot* slot = rewrite->AsSlot();
     if (FLAG_debug_code) {
@@ -588,7 +579,7 @@
           UNREACHABLE();
       }
     }
-    Move(expr->context(), slot, r0);
+    Move(context, slot, r0);
   } else {
     // A variable has been rewritten into an explicit access to
     // an object property.
@@ -623,7 +614,7 @@
     __ Call(ic, RelocInfo::CODE_TARGET);
 
     // Drop key and object left on the stack by IC, and push the result.
-    DropAndMove(expr->context(), r0, 2);
+    DropAndMove(context, r0, 2);
   }
 }
 
@@ -657,32 +648,15 @@
 
 void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Label boilerplate_exists;
   __ ldr(r2, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
-  // r2 = literal array (0).
   __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
-  int literal_offset =
-      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
-  __ ldr(r0, FieldMemOperand(r2, literal_offset));
-  // Check whether we need to materialize the object literal boilerplate.
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, Operand(ip));
-  __ b(ne, &boilerplate_exists);
-  // Create boilerplate if it does not exist.
-  // r1 = literal index (1).
   __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
-  // r0 = constant properties (2).
   __ mov(r0, Operand(expr->constant_properties()));
   __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
-  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ bind(&boilerplate_exists);
-  // r0 contains boilerplate.
-  // Clone boilerplate.
-  __ push(r0);
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
   }
 
   // If result_saved == true: The result is saved on top of the
@@ -783,32 +757,15 @@
 
 void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
-  Label make_clone;
-
-  // Fetch the function's literals array.
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
-  // Check if the literal's boilerplate has been instantiated.
-  int offset =
-      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
-  __ ldr(r0, FieldMemOperand(r3, offset));
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
-  __ b(&make_clone, ne);
-
-  // Instantiate the boilerplate.
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
   __ mov(r1, Operand(expr->literals()));
   __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
-  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
-  __ bind(&make_clone);
-  // Clone the boilerplate.
-  __ push(r0);
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
@@ -880,10 +837,38 @@
 }
 
 
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
+                                              Expression::Context context) {
+  Literal* key = prop->key()->AsLiteral();
+  __ mov(r2, Operand(key->handle()));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  Move(context, r0);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  Move(context, r0);
+}
+
+
+void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
+                                                 Expression::Context context) {
+  __ pop(r0);
+  __ pop(r1);
+  GenericBinaryOpStub stub(op,
+                           NO_OVERWRITE);
+  __ CallStub(&stub);
+  Move(context, r0);
+}
+
+
 void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-
+  ASSERT(var->is_global() || var->slot() != NULL);
   if (var->is_global()) {
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in r0, variable name in
@@ -996,35 +981,6 @@
         UNREACHABLE();
         break;
     }
-  } else {
-    Property* property = var->rewrite()->AsProperty();
-    ASSERT_NOT_NULL(property);
-
-    // Load object and key onto the stack.
-    Slot* object_slot = property->obj()->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
-    Move(Expression::kValue, object_slot, r0);
-
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    Move(Expression::kValue, key_literal);
-
-    // Value to store was pushed before object and key on the stack.
-    __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
-
-    // Arguments to ic is value in r0, object and key on stack.
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-
-    if (expr->context() == Expression::kEffect) {
-      __ add(sp, sp, Operand(3 * kPointerSize));
-    } else if (expr->context() == Expression::kValue) {
-      // Value is still on the stack in esp[2 * kPointerSize]
-      __ add(sp, sp, Operand(2 * kPointerSize));
-    } else {
-      __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
-      DropAndMove(expr->context(), r0, 3);
-    }
   }
 }
 
@@ -1726,7 +1682,63 @@
 }
 
 
-#undef __
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  Move(expr->context(), r0);
+}
 
 
+Register FastCodeGenerator::result_register() { return r0; }
+
+
+Register FastCodeGenerator::context_register() { return cp; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FastCodeGenerator::EnterFinallyBlock() {
+  ASSERT(!result_register().is(r1));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta)
+  __ sub(r1, lr, Operand(masm_->CodeObject()));
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  ASSERT_EQ(0, kSmiTag);
+  __ add(r1, r1, Operand(r1));  // Convert to smi.
+  __ push(r1);
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+  ASSERT(!result_register().is(r1));
+  // Restore result register from stack.
+  __ pop(r1);
+  // Uncook return address and return.
+  __ pop(result_register());
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ mov(r1, Operand(r1, ASR, 1));  // Un-smi-tag value.
+  __ add(pc, r1, Operand(masm_->CodeObject()));
+}
+
+
+void FastCodeGenerator::ThrowException() {
+  __ push(result_register());
+  __ CallRuntime(Runtime::kThrow, 1);
+}
+
+
+#undef __
+
 } }  // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index b0fa13a..0cb7f12 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -28,7 +28,11 @@
 #include "v8.h"
 
 #include "frames-inl.h"
+#ifdef V8_ARM_VARIANT_THUMB
+#include "arm/assembler-thumb2-inl.h"
+#else
 #include "arm/assembler-arm-inl.h"
+#endif
 
 
 namespace v8 {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index aa6570c..876eec1 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -162,6 +162,21 @@
 }
 
 
+void MacroAssembler::Drop(int stack_elements, Condition cond) {
+  if (stack_elements > 0) {
+    add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
+  }
+}
+
+
+void MacroAssembler::Call(Label* target) {
+  bl(target);
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+  mov(dst, Operand(value));
+}
 
 
 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@@ -628,6 +643,15 @@
 }
 
 
+void MacroAssembler::PopTryHandler() {
+  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  pop(r1);
+  mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+  str(r1, MemOperand(ip));
+}
+
+
 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
                                    JSObject* holder, Register holder_reg,
                                    Register scratch,
@@ -994,9 +1018,9 @@
                                                        Register outLowReg) {
   // ARMv7 VFP3 instructions to implement integer to double conversion.
   mov(r7, Operand(inReg, ASR, kSmiTagSize));
-  fmsr(s15, r7);
-  fsitod(d7, s15);
-  fmrrd(outLowReg, outHighReg, d7);
+  vmov(s15, r7);
+  vcvt(d7, s15);
+  vmov(outLowReg, outHighReg, d7);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 0974329..88bfa9c 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -64,6 +64,9 @@
   void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
   void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
   void Ret(Condition cond = al);
+  void Drop(int stack_elements, Condition cond = al);
+  void Call(Label* target);
+  void Move(Register dst, Handle<Object> value);
   // Jumps to the label at the index given by the Smi in "index".
   void SmiJumpTable(Register index, Vector<Label*> targets);
   // Load an object from the root table.
@@ -148,6 +151,9 @@
   // On exit, r0 contains TOS (code slot).
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  // Must preserve the result register.
+  void PopTryHandler();
 
   // ---------------------------------------------------------------------------
   // Inline caching support
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 9dc417b..f392772 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1893,14 +1893,14 @@
 
 // void Simulator::DecodeTypeVFP(Instr* instr)
 // The Following ARMv7 VFPv instructions are currently supported.
-// fmsr :Sn = Rt
-// fmrs :Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov :Sn = Rt
+// vmov :Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
 // vcmp(Dd, Dm)
 // VMRS
 void Simulator::DecodeTypeVFP(Instr* instr) {
@@ -2020,8 +2020,8 @@
 
 // void Simulator::DecodeType6CoprocessorIns(Instr* instr)
 // Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
 void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index efccaf4..7c5ad0e 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -446,7 +446,7 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
                                         JSObject* holder,
                                         Register receiver,
                                         Register name_reg,
@@ -454,7 +454,8 @@
                                         Register scratch2,
                                         AccessorInfo* callback,
                                         String* name,
-                                        Label* miss) {
+                                        Label* miss,
+                                        Failure** failure) {
   // Check that the receiver isn't a smi.
   __ tst(receiver, Operand(kSmiTagMask));
   __ b(eq, miss);
@@ -476,6 +477,8 @@
   ExternalReference load_callback_property =
       ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
   __ TailCallRuntime(load_callback_property, 5, 1);
+
+  return true;
 }
 
 
@@ -1003,10 +1006,10 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+                                              JSObject* object,
                                               JSObject* holder,
-                                              AccessorInfo* callback,
-                                              String* name) {
+                                              AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1015,7 +1018,11 @@
   Label miss;
 
   __ ldr(r0, MemOperand(sp, 0));
-  GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1168,7 +1175,11 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 47ecb96..132c8ae 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -243,11 +243,8 @@
 
 void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
                                  InvokeJSFlags flags,
-                                 Result* arg_count_register,
                                  int arg_count) {
-  ASSERT(arg_count_register->reg().is(r0));
   PrepareForCall(arg_count, arg_count);
-  arg_count_register->Unuse();
   __ InvokeBuiltin(id, flags);
 }
 
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 457478d..d523000 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -305,7 +305,6 @@
   // removes from) the stack.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeJSFlags flag,
-                     Result* arg_count_register,
                      int arg_count);
 
   // Call into an IC stub given the number of arguments it removes
diff --git a/src/assembler.cc b/src/assembler.cc
index 9c9ddcd..2d16250 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -573,6 +573,16 @@
 }
 
 
+ExternalReference ExternalReference::keyed_lookup_cache_keys() {
+  return ExternalReference(KeyedLookupCache::keys_address());
+}
+
+
+ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
+  return ExternalReference(KeyedLookupCache::field_offsets_address());
+}
+
+
 ExternalReference ExternalReference::the_hole_value_location() {
   return ExternalReference(Factory::the_hole_value().location());
 }
diff --git a/src/assembler.h b/src/assembler.h
index aecd4cd..87cde9b 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -401,6 +401,10 @@
   static ExternalReference builtin_passed_function();
   static ExternalReference random_positive_smi_function();
 
+  // Static data in the keyed lookup cache.
+  static ExternalReference keyed_lookup_cache_keys();
+  static ExternalReference keyed_lookup_cache_field_offsets();
+
   // Static variable Factory::the_hole_value.location()
   static ExternalReference the_hole_value_location();
 
diff --git a/src/ast.h b/src/ast.h
index c27d558..00f3629 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1241,6 +1241,8 @@
   Expression* target() const { return target_; }
   Expression* value() const { return value_; }
   int position() { return pos_; }
+  // This check relies on the definition order of token in token.h.
+  bool is_compound() const { return op() > Token::ASSIGN; }
 
   // An initialization block is a series of statments of the form
   // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index deda96f..6ae3125 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -95,6 +95,8 @@
 static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
 // This is for delete, not delete[].
 static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
+// This is for delete[]
+static List<char*>* delete_these_arrays_on_tear_down = NULL;
 
 
 NativesExternalStringResource::NativesExternalStringResource(const char* source)
@@ -150,17 +152,41 @@
 }
 
 
+char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
+  char* memory = new char[bytes];
+  if (memory != NULL) {
+    if (delete_these_arrays_on_tear_down == NULL) {
+      delete_these_arrays_on_tear_down = new List<char*>(2);
+    }
+    delete_these_arrays_on_tear_down->Add(memory);
+  }
+  return memory;
+}
+
+
 void Bootstrapper::TearDown() {
   if (delete_these_non_arrays_on_tear_down != NULL) {
     int len = delete_these_non_arrays_on_tear_down->length();
     ASSERT(len < 20);  // Don't use this mechanism for unbounded allocations.
     for (int i = 0; i < len; i++) {
       delete delete_these_non_arrays_on_tear_down->at(i);
+      delete_these_non_arrays_on_tear_down->at(i) = NULL;
     }
     delete delete_these_non_arrays_on_tear_down;
     delete_these_non_arrays_on_tear_down = NULL;
   }
 
+  if (delete_these_arrays_on_tear_down != NULL) {
+    int len = delete_these_arrays_on_tear_down->length();
+    ASSERT(len < 1000);  // Don't use this mechanism for unbounded allocations.
+    for (int i = 0; i < len; i++) {
+      delete[] delete_these_arrays_on_tear_down->at(i);
+      delete_these_arrays_on_tear_down->at(i) = NULL;
+    }
+    delete delete_these_arrays_on_tear_down;
+    delete_these_arrays_on_tear_down = NULL;
+  }
+
   natives_cache.Initialize(false);  // Yes, symmetrical
   extensions_cache.Initialize(false);
 }
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 07d2747..7cd3a2b 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -74,6 +74,10 @@
   static char* ArchiveState(char* to);
   static char* RestoreState(char* from);
   static void FreeThreadResources();
+
+  // This will allocate a char array that is deleted when V8 is shut down.
+  // It should only be used for strictly finite allocations.
+  static char* AllocateAutoDeletedArray(int bytes);
 };
 
 
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index dbc39ff..09581aa 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -35,82 +35,117 @@
 namespace v8 {
 namespace internal {
 
-Handle<Code> CodeStub::GetCode() {
-  bool custom_cache = has_custom_cache();
-
-  int index = 0;
-  uint32_t key = 0;
-  if (custom_cache) {
-    Code* cached;
-    if (GetCustomCache(&cached)) {
-      return Handle<Code>(cached);
-    } else {
-      index = NumberDictionary::kNotFound;
-    }
-  } else {
-    key = GetKey();
-    index = Heap::code_stubs()->FindEntry(key);
-    if (index != NumberDictionary::kNotFound)
-      return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+bool CodeStub::FindCodeInCache(Code** code_out) {
+  if (has_custom_cache()) return GetCustomCache(code_out);
+  int index = Heap::code_stubs()->FindEntry(GetKey());
+  if (index != NumberDictionary::kNotFound) {
+    *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
+    return true;
   }
+  return false;
+}
 
-  Code* result;
-  {
+
+void CodeStub::GenerateCode(MacroAssembler* masm) {
+  // Update the static counter each time a new code stub is generated.
+  Counters::code_stubs.Increment();
+  // Nested stubs are not allowed for leafs.
+  masm->set_allow_stub_calls(AllowsStubCalls());
+  // Generate the code for the stub.
+  masm->set_generating_stub(true);
+  Generate(masm);
+}
+
+
+void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
+  code->set_major_key(MajorKey());
+
+  // Add unresolved entries in the code to the fixup list.
+  Bootstrapper::AddFixup(code, masm);
+
+  LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+  Counters::total_stubs_code_size.Increment(code->instruction_size());
+
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_print_code_stubs) {
+#ifdef DEBUG
+    Print();
+#endif
+    code->Disassemble(GetName());
+    PrintF("\n");
+  }
+#endif
+}
+
+
+Handle<Code> CodeStub::GetCode() {
+  Code* code;
+  if (!FindCodeInCache(&code)) {
     v8::HandleScope scope;
 
-    // Update the static counter each time a new code stub is generated.
-    Counters::code_stubs.Increment();
-
     // Generate the new code.
     MacroAssembler masm(NULL, 256);
-
-    // Nested stubs are not allowed for leafs.
-    masm.set_allow_stub_calls(AllowsStubCalls());
-
-    // Generate the code for the stub.
-    masm.set_generating_stub(true);
-    Generate(&masm);
+    GenerateCode(&masm);
 
     // Create the code object.
     CodeDesc desc;
     masm.GetCode(&desc);
 
-    // Copy the generated code into a heap object, and store the major key.
+    // Copy the generated code into a heap object.
     Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
-    Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
-    code->set_major_key(MajorKey());
+    Handle<Code> new_object =
+        Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+    RecordCodeGeneration(*new_object, &masm);
 
-    // Add unresolved entries in the code to the fixup list.
-    Bootstrapper::AddFixup(*code, &masm);
-
-    LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
-    Counters::total_stubs_code_size.Increment(code->instruction_size());
-
-#ifdef ENABLE_DISASSEMBLER
-    if (FLAG_print_code_stubs) {
-#ifdef DEBUG
-      Print();
-#endif
-      code->Disassemble(GetName());
-      PrintF("\n");
-    }
-#endif
-
-    if (custom_cache) {
-      SetCustomCache(*code);
+    if (has_custom_cache()) {
+      SetCustomCache(*new_object);
     } else {
       // Update the dictionary and the root in Heap.
       Handle<NumberDictionary> dict =
           Factory::DictionaryAtNumberPut(
               Handle<NumberDictionary>(Heap::code_stubs()),
-              key,
-              code);
+              GetKey(),
+              new_object);
       Heap::public_set_code_stubs(*dict);
     }
-    result = *code;
+    code = *new_object;
   }
 
-  return Handle<Code>(result);
+  return Handle<Code>(code);
+}
+
+
+Object* CodeStub::TryGetCode() {
+  Code* code;
+  if (!FindCodeInCache(&code)) {
+    // Generate the new code.
+    MacroAssembler masm(NULL, 256);
+    GenerateCode(&masm);
+
+    // Create the code object.
+    CodeDesc desc;
+    masm.GetCode(&desc);
+
+    // Try to copy the generated code into a heap object.
+    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Object* new_object =
+        Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+    if (new_object->IsFailure()) return new_object;
+    code = Code::cast(new_object);
+    RecordCodeGeneration(code, &masm);
+
+    if (has_custom_cache()) {
+      SetCustomCache(code);
+    } else {
+      // Try to update the code cache but do not fail if unable.
+      new_object = Heap::code_stubs()->AtNumberPut(GetKey(), code);
+      if (!new_object->IsFailure()) {
+        Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
+      }
+    }
+  }
+
+  return code;
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 25a2d0f..9012fe7 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -83,6 +83,11 @@
   // Retrieve the code for the stub. Generate the code if needed.
   Handle<Code> GetCode();
 
+  // Retrieve the code for the stub if already generated.  Do not
+  // generate the code if not already generated and instead return a
+  // retry after GC Failure object.
+  Object* TryGetCode();
+
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
   };
@@ -104,9 +109,20 @@
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
+  // Lookup the code in the (possibly custom) cache.
+  bool FindCodeInCache(Code** code_out);
+
+  // Nonvirtual wrapper around the stub-specific Generate function.  Call
+  // this function to set up the macro assembler and generate the code.
+  void GenerateCode(MacroAssembler* masm);
+
   // Generates the assembler code for the stub.
   virtual void Generate(MacroAssembler* masm) = 0;
 
+  // Perform bookkeeping required after code generation when stub code is
+  // initially generated.
+  void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
diff --git a/src/compiler.cc b/src/compiler.cc
index b5fbba4..2c055a3 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -56,6 +56,8 @@
  private:
   // Visit an expression in a given expression context.
   void ProcessExpression(Expression* expr, Expression::Context context) {
+    ASSERT(expr->context() == Expression::kUninitialized ||
+           expr->context() == context);
     Expression::Context saved = context_;
     context_ = context;
     Visit(expr);
@@ -596,7 +598,7 @@
       Slot* slot = scope->parameter(i)->slot();
       if (slot != NULL && slot->type() == Slot::CONTEXT) {
         if (FLAG_trace_bailout) {
-          PrintF("function has context-allocated parameters");
+          PrintF("Function has context-allocated parameters.\n");
         }
         return NORMAL;
       }
@@ -688,12 +690,10 @@
 
 
 void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
-  BAILOUT("ContinueStatement");
 }
 
 
 void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
-  BAILOUT("BreakStatement");
 }
 
 
@@ -703,12 +703,12 @@
 
 
 void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  BAILOUT("WithEnterStatement");
+  ProcessExpression(stmt->expression(), Expression::kValue);
 }
 
 
 void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
-  BAILOUT("WithExitStatement");
+  // Supported.
 }
 
 
@@ -736,21 +736,7 @@
 
 
 void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
-  // We do not handle loops with breaks or continue statements in their
-  // body.  We will bailout when we hit those statements in the body.
-  if (stmt->init() != NULL) {
-    Visit(stmt->init());
-    CHECK_BAILOUT;
-  }
-  if (stmt->cond() != NULL) {
-    ProcessExpression(stmt->cond(), Expression::kTest);
-    CHECK_BAILOUT;
-  }
-  Visit(stmt->body());
-  if (stmt->next() != NULL) {
-    CHECK_BAILOUT;
-    Visit(stmt->next());
-  }
+  BAILOUT("ForStatement");
 }
 
 
@@ -765,7 +751,9 @@
 
 
 void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  BAILOUT("TryFinallyStatement");
+  Visit(stmt->try_block());
+  CHECK_BAILOUT;
+  Visit(stmt->finally_block());
 }
 
 
@@ -897,34 +885,22 @@
   // non-context (stack-allocated) locals, and global variables.
   Token::Value op = expr->op();
   if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-  if (op != Token::ASSIGN && op != Token::INIT_VAR) {
-    BAILOUT("compound assignment");
-  }
 
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   Property* prop = expr->target()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
   if (var != NULL) {
     // All global variables are supported.
     if (!var->is_global()) {
-      if (var->slot() == NULL) {
-        Property* property = var->AsProperty();
-        if (property == NULL) {
-          BAILOUT("non-global/non-slot/non-property assignment");
-        }
-        if (property->obj()->AsSlot() == NULL) {
-          BAILOUT("variable rewritten to property non slot object assignment");
-        }
-        if (property->key()->AsLiteral() == NULL) {
-          BAILOUT("variable rewritten to property non literal key assignment");
-        }
-      } else {
-        Slot::Type type = var->slot()->type();
-        if (type == Slot::LOOKUP) {
-          BAILOUT("Lookup slot");
-        }
+      ASSERT(var->slot() != NULL);
+      Slot::Type type = var->slot()->type();
+      if (type == Slot::LOOKUP) {
+        BAILOUT("Lookup slot");
       }
     }
   } else if (prop != NULL) {
+    ASSERT(prop->obj()->context() == Expression::kUninitialized ||
+           prop->obj()->context() == Expression::kValue);
     ProcessExpression(prop->obj(), Expression::kValue);
     CHECK_BAILOUT;
     // We will only visit the key during code generation for keyed property
@@ -935,6 +911,8 @@
     if (lit == NULL ||
         !lit->handle()->IsSymbol() ||
         String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
+      ASSERT(prop->key()->context() == Expression::kUninitialized ||
+             prop->key()->context() == Expression::kValue);
       ProcessExpression(prop->key(), Expression::kValue);
       CHECK_BAILOUT;
     }
@@ -1130,7 +1108,7 @@
 
 
 void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
-  BAILOUT("ThisFunction");
+  // ThisFunction is supported.
 }
 
 #undef BAILOUT
diff --git a/src/execution.cc b/src/execution.cc
index 2f646a5..8a50864 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -30,6 +30,7 @@
 #include "v8.h"
 
 #include "api.h"
+#include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
 #include "simulator.h"
@@ -607,6 +608,11 @@
     return Heap::undefined_value();
   }
 
+  // Ignore debug break during bootstrapping.
+  if (Bootstrapper::IsActive()) {
+    return Heap::undefined_value();
+  }
+
   {
     JavaScriptFrameIterator it;
     ASSERT(!it.done());
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 20de808..b15a673 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
 
 Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
                                          Handle<Script> script,
@@ -232,8 +232,10 @@
 
 void FastCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
+  Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
   VisitStatements(stmt->statements());
+  __ bind(nested_statement.break_target());
 }
 
 
@@ -278,22 +280,88 @@
 
 
 void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_,  "[ ContinueStatement");
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (!current->IsContinueTarget(stmt->target())) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  Iteration* loop = current->AsIteration();
+  __ jmp(loop->continue_target());
 }
 
 
 void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_,  "[ BreakStatement");
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (!current->IsBreakTarget(stmt->target())) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  Breakable* target = current->AsBreakable();
+  __ jmp(target->break_target());
 }
 
 
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  Expression* expr = stmt->expression();
+  // Complete the statement based on the type of the subexpression.
+  if (expr->AsLiteral() != NULL) {
+    __ Move(result_register(), expr->AsLiteral()->handle());
+  } else {
+    ASSERT_EQ(Expression::kValue, expr->context());
+    Visit(expr);
+    __ pop(result_register());
+  }
+
+  // Exit all nested statements.
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (current != NULL) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  EmitReturnSequence(stmt->statement_pos());
+}
+
+
+
+
 void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  SetStatementPosition(stmt);
+
+  Visit(stmt->expression());
+  if (stmt->is_catch_block()) {
+    __ CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    __ CallRuntime(Runtime::kPushContext, 1);
+  }
+  // Both runtime calls return the new context in both the context and the
+  // result registers.
+
+  // Update local stack frame context field.
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 }
 
 
 void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ WithExitStatement");
+  SetStatementPosition(stmt);
+
+  // Pop context.
+  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+  // Update local stack frame context field.
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 }
 
 
@@ -304,8 +372,10 @@
 
 void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   Comment cmnt(masm_, "[ DoWhileStatement");
+  Label body, stack_limit_hit, stack_check_success;
+
+  Iteration loop_statement(this, stmt);
   increment_loop_depth();
-  Label body, exit, stack_limit_hit, stack_check_success;
 
   __ bind(&body);
   Visit(stmt->body());
@@ -316,10 +386,11 @@
 
   // We are not in an expression context because we have been compiling
   // statements.  Set up a test expression context for the condition.
+  __ bind(loop_statement.continue_target());
   ASSERT_EQ(NULL, true_label_);
   ASSERT_EQ(NULL, false_label_);
   true_label_ = &body;
-  false_label_ = &exit;
+  false_label_ = loop_statement.break_target();
   ASSERT(stmt->cond()->context() == Expression::kTest);
   Visit(stmt->cond());
   true_label_ = NULL;
@@ -330,7 +401,7 @@
   __ CallStub(&stack_stub);
   __ jmp(&stack_check_success);
 
-  __ bind(&exit);
+  __ bind(loop_statement.break_target());
 
   decrement_loop_depth();
 }
@@ -338,16 +409,18 @@
 
 void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   Comment cmnt(masm_, "[ WhileStatement");
+  Label body, stack_limit_hit, stack_check_success;
+
+  Iteration loop_statement(this, stmt);
   increment_loop_depth();
-  Label test, body, exit, stack_limit_hit, stack_check_success;
 
   // Emit the test at the bottom of the loop.
-  __ jmp(&test);
+  __ jmp(loop_statement.continue_target());
 
   __ bind(&body);
   Visit(stmt->body());
 
-  __ bind(&test);
+  __ bind(loop_statement.continue_target());
   // Check stack before looping.
   __ StackLimitCheck(&stack_limit_hit);
   __ bind(&stack_check_success);
@@ -357,7 +430,7 @@
   ASSERT_EQ(NULL, true_label_);
   ASSERT_EQ(NULL, false_label_);
   true_label_ = &body;
-  false_label_ = &exit;
+  false_label_ = loop_statement.break_target();
   ASSERT(stmt->cond()->context() == Expression::kTest);
   Visit(stmt->cond());
   true_label_ = NULL;
@@ -368,55 +441,13 @@
   __ CallStub(&stack_stub);
   __ jmp(&stack_check_success);
 
-  __ bind(&exit);
-
+  __ bind(loop_statement.break_target());
   decrement_loop_depth();
 }
 
 
 void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
-  Comment cmnt(masm_, "[ ForStatement");
-  Label test, body, exit, stack_limit_hit, stack_check_success;
-  if (stmt->init() != NULL) Visit(stmt->init());
-
-  increment_loop_depth();
-  // Emit the test at the bottom of the loop (even if empty).
-  __ jmp(&test);
-  __ bind(&body);
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
-
-  if (stmt->next() != NULL) Visit(stmt->next());
-
-  __ bind(&test);
-
-  if (stmt->cond() == NULL) {
-    // For an empty test jump to the top of the loop.
-    __ jmp(&body);
-  } else {
-    // We are not in an expression context because we have been compiling
-    // statements.  Set up a test expression context for the condition.
-    ASSERT_EQ(NULL, true_label_);
-    ASSERT_EQ(NULL, false_label_);
-
-    true_label_ = &body;
-    false_label_ = &exit;
-    ASSERT(stmt->cond()->context() == Expression::kTest);
-    Visit(stmt->cond());
-    true_label_ = NULL;
-    false_label_ = NULL;
-  }
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(&exit);
-  decrement_loop_depth();
+  UNREACHABLE();
 }
 
 
@@ -431,7 +462,63 @@
 
 
 void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  UNREACHABLE();
+  // Try finally is compiled by setting up a try-handler on the stack while
+  // executing the try body, and removing it again afterwards.
+  //
+  // The try-finally construct can enter the finally block in three ways:
+  // 1. By exiting the try-block normally. This removes the try-handler and
+  //      calls the finally block code before continuing.
+  // 2. By exiting the try-block with a function-local control flow transfer
+  //    (break/continue/return). The site of the, e.g., break removes the
+  //    try handler and calls the finally block code before continuing
+  //    its outward control transfer.
+  // 3. by exiting the try-block with a thrown exception.
+  //    This can happen in nested function calls. It traverses the try-handler
+  //    chaing and consumes the try-handler entry before jumping to the
+  //    handler code. The handler code then calls the finally-block before
+  //    rethrowing the exception.
+  //
+  // The finally block must assume a return address on top of the stack
+  // (or in the link register on ARM chips) and a value (return value or
+  // exception) in the result register (rax/eax/r0), both of which must
+  // be preserved. The return address isn't GC-safe, so it should be
+  // cooked before GC.
+  Label finally_entry;
+  Label try_handler_setup;
+
+  // Setup the try-handler chain. Use a call to
+  // Jump to try-handler setup and try-block code. Use call to put try-handler
+  // address on stack.
+  __ Call(&try_handler_setup);
+  // Try handler code. Return address of call is pushed on handler stack.
+  {
+    // This code is only executed during stack-handler traversal when an
+    // exception is thrown. The execption is in the result register, which
+    // is retained by the finally block.
+    // Call the finally block and then rethrow the exception.
+    __ Call(&finally_entry);
+    ThrowException();
+  }
+
+  __ bind(&finally_entry);
+  {
+    // Finally block implementation.
+    EnterFinallyBlock();
+    Finally finally_block(this);
+    Visit(stmt->finally_block());
+    ExitFinallyBlock();  // Return to the calling code.
+  }
+
+  __ bind(&try_handler_setup);
+  {
+    // Setup try handler (stack pointer registers).
+    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+    TryFinally try_block(this, &finally_entry);
+    VisitStatements(stmt->try_block()->statements());
+    __ PopTryHandler();
+  }
+  // Execute the finally block on the way out.
+  __ Call(&finally_entry);
 }
 
 
@@ -500,40 +587,79 @@
 
 void FastCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
 
   // Record source code position of the (possible) IC call.
   SetSourcePosition(expr->position());
 
-  Expression* rhs = expr->value();
-  // Left-hand side can only be a property, a global or a (parameter or
-  // local) slot.
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
   Property* prop = expr->target()->AsProperty();
-  if (var != NULL) {
-    Visit(rhs);
-    ASSERT_EQ(Expression::kValue, rhs->context());
-    EmitVariableAssignment(expr);
-  } else if (prop != NULL) {
-    // Assignment to a property.
-    Visit(prop->obj());
-    ASSERT_EQ(Expression::kValue, prop->obj()->context());
-    // Use the expression context of the key subexpression to detect whether
-    // we have decided to us a named or keyed IC.
-    if (prop->key()->context() == Expression::kUninitialized) {
-      ASSERT(prop->key()->AsLiteral() != NULL);
-      Visit(rhs);
-      ASSERT_EQ(Expression::kValue, rhs->context());
-      EmitNamedPropertyAssignment(expr);
-    } else {
+  // In case of a property we use the uninitialized expression context
+  // of the key to detect a named property.
+  if (prop != NULL) {
+    assign_type = (prop->key()->context() == Expression::kUninitialized)
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      Visit(prop->obj());
+      ASSERT_EQ(Expression::kValue, prop->obj()->context());
+      break;
+    case KEYED_PROPERTY:
+      Visit(prop->obj());
+      ASSERT_EQ(Expression::kValue, prop->obj()->context());
       Visit(prop->key());
       ASSERT_EQ(Expression::kValue, prop->key()->context());
-      Visit(rhs);
-      ASSERT_EQ(Expression::kValue, rhs->context());
-      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+
+  // If we have a compound assignment: Get value of LHS expression and
+  // store in on top of the stack.
+  // Note: Relies on kValue context being 'stack'.
+  if (expr->is_compound()) {
+    switch (assign_type) {
+      case VARIABLE:
+        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+                         Expression::kValue);
+        break;
+      case NAMED_PROPERTY:
+        EmitNamedPropertyLoad(prop, Expression::kValue);
+        break;
+      case KEYED_PROPERTY:
+        EmitKeyedPropertyLoad(Expression::kValue);
+        break;
     }
-  } else {
-    UNREACHABLE();
+  }
+
+  // Evaluate RHS expression.
+  Expression* rhs = expr->value();
+  ASSERT_EQ(Expression::kValue, rhs->context());
+  Visit(rhs);
+
+  // If we have a compount assignment: Apply operator.
+  if (expr->is_compound()) {
+    EmitCompoundAssignmentOp(expr->binary_op(), Expression::kValue);
+  }
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
   }
 }
 
@@ -548,8 +674,20 @@
 }
 
 
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  UNREACHABLE();
+int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
+  // The macros used here must preserve the result register.
+  __ Drop(stack_depth);
+  __ PopTryHandler();
+  __ Call(finally_entry_);
+  return 0;
+}
+
+
+int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
+  // The macros used here must preserve the result register.
+  __ Drop(stack_depth);
+  __ PopTryHandler();
+  return 0;
 }
 
 
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index 9b262a7..1ce759d 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -35,6 +35,8 @@
 namespace v8 {
 namespace internal {
 
+// -----------------------------------------------------------------------------
+// Fast code generator.
 
 class FastCodeGenerator: public AstVisitor {
  public:
@@ -43,6 +45,7 @@
         function_(NULL),
         script_(script),
         is_eval_(is_eval),
+        nesting_stack_(NULL),
         loop_depth_(0),
         true_label_(NULL),
         false_label_(NULL) {
@@ -55,6 +58,159 @@
   void Generate(FunctionLiteral* fun);
 
  private:
+  class Breakable;
+  class Iteration;
+  class TryCatch;
+  class TryFinally;
+  class Finally;
+  class ForIn;
+
+  class NestedStatement BASE_EMBEDDED {
+   public:
+    explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
+      // Link into codegen's nesting stack.
+      previous_ = codegen->nesting_stack_;
+      codegen->nesting_stack_ = this;
+    }
+    virtual ~NestedStatement() {
+      // Unlink from codegen's nesting stack.
+      ASSERT_EQ(this, codegen_->nesting_stack_);
+      codegen_->nesting_stack_ = previous_;
+    }
+
+    virtual Breakable* AsBreakable() { return NULL; }
+    virtual Iteration* AsIteration() { return NULL; }
+    virtual TryCatch* AsTryCatch() { return NULL; }
+    virtual TryFinally* AsTryFinally() { return NULL; }
+    virtual Finally* AsFinally() { return NULL; }
+    virtual ForIn* AsForIn() { return NULL; }
+
+    virtual bool IsContinueTarget(Statement* target) { return false; }
+    virtual bool IsBreakTarget(Statement* target) { return false; }
+
+    // Generate code to leave the nested statement. This includes
+    // cleaning up any stack elements in use and restoring the
+    // stack to the expectations of the surrounding statements.
+    // Takes a number of stack elements currently on top of the
+    // nested statement's stack, and returns a number of stack
+    // elements left on top of the surrounding statement's stack.
+    // The generated code must preserve the result register (which
+    // contains the value in case of a return).
+    virtual int Exit(int stack_depth) {
+      // Default implementation for the case where there is
+      // nothing to clean up.
+      return stack_depth;
+    }
+    NestedStatement* outer() { return previous_; }
+   protected:
+    MacroAssembler* masm() { return codegen_->masm(); }
+   private:
+    FastCodeGenerator* codegen_;
+    NestedStatement* previous_;
+    DISALLOW_COPY_AND_ASSIGN(NestedStatement);
+  };
+
+  class Breakable : public NestedStatement {
+   public:
+    Breakable(FastCodeGenerator* codegen,
+              BreakableStatement* break_target)
+        : NestedStatement(codegen),
+          target_(break_target) {}
+    virtual ~Breakable() {}
+    virtual Breakable* AsBreakable() { return this; }
+    virtual bool IsBreakTarget(Statement* statement) {
+      return target_ == statement;
+    }
+    BreakableStatement* statement() { return target_; }
+    Label* break_target() { return &break_target_label_; }
+   private:
+    BreakableStatement* target_;
+    Label break_target_label_;
+    DISALLOW_COPY_AND_ASSIGN(Breakable);
+  };
+
+  class Iteration : public Breakable {
+   public:
+    Iteration(FastCodeGenerator* codegen,
+              IterationStatement* iteration_statement)
+        : Breakable(codegen, iteration_statement) {}
+    virtual ~Iteration() {}
+    virtual Iteration* AsIteration() { return this; }
+    virtual bool IsContinueTarget(Statement* statement) {
+      return this->statement() == statement;
+    }
+    Label* continue_target() { return &continue_target_label_; }
+   private:
+    Label continue_target_label_;
+    DISALLOW_COPY_AND_ASSIGN(Iteration);
+  };
+
+  // The environment inside the try block of a try/catch statement.
+  class TryCatch : public NestedStatement {
+   public:
+    explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
+        : NestedStatement(codegen), catch_entry_(catch_entry) { }
+    virtual ~TryCatch() {}
+    virtual TryCatch* AsTryCatch() { return this; }
+    Label* catch_entry() { return catch_entry_; }
+    virtual int Exit(int stack_depth);
+   private:
+    Label* catch_entry_;
+    DISALLOW_COPY_AND_ASSIGN(TryCatch);
+  };
+
+  // The environment inside the try block of a try/finally statement.
+  class TryFinally : public NestedStatement {
+   public:
+    explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
+        : NestedStatement(codegen), finally_entry_(finally_entry) { }
+    virtual ~TryFinally() {}
+    virtual TryFinally* AsTryFinally() { return this; }
+    Label* finally_entry() { return finally_entry_; }
+    virtual int Exit(int stack_depth);
+   private:
+    Label* finally_entry_;
+    DISALLOW_COPY_AND_ASSIGN(TryFinally);
+  };
+
+  // A FinallyEnvironment represents being inside a finally block.
+  // Abnormal termination of the finally block needs to clean up
+  // the block's parameters from the stack.
+  class Finally : public NestedStatement {
+   public:
+    explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
+    virtual ~Finally() {}
+    virtual Finally* AsFinally() { return this; }
+    virtual int Exit(int stack_depth) {
+      return stack_depth + kFinallyStackElementCount;
+    }
+   private:
+    // Number of extra stack slots occupied during a finally block.
+    static const int kFinallyStackElementCount = 2;
+    DISALLOW_COPY_AND_ASSIGN(Finally);
+  };
+
+  // A ForInEnvironment represents being inside a for-in loop.
+  // Abnormal termination of the for-in block needs to clean up
+  // the block's temporary storage from the stack.
+  class ForIn : public Iteration {
+   public:
+    ForIn(FastCodeGenerator* codegen,
+          ForInStatement* statement)
+        : Iteration(codegen, statement) { }
+    virtual ~ForIn() {}
+    virtual ForIn* AsForIn() { return this; }
+    virtual int Exit(int stack_depth) {
+      return stack_depth + kForInStackElementCount;
+    }
+   private:
+    // TODO(lrn): Check that this value is correct when implementing
+    // for-in.
+    static const int kForInStackElementCount = 5;
+    DISALLOW_COPY_AND_ASSIGN(ForIn);
+  };
+
+
   int SlotOffset(Slot* slot);
   void Move(Expression::Context destination, Register source);
   void Move(Expression::Context destination, Slot* source, Register scratch);
@@ -86,8 +242,23 @@
   void EmitCallWithStub(Call* expr);
   void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
 
+  // Platform-specific code for loading variables.
+  void EmitVariableLoad(Variable* expr, Expression::Context context);
+
   // Platform-specific support for compiling assignments.
 
+  // Load a value from a named property and push the result on the stack.
+  // The receiver is left on the stack by the IC.
+  void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
+
+  // Load a value from a named property and push the result on the stack.
+  // The receiver and the key is left on the stack by the IC.
+  void EmitKeyedPropertyLoad(Expression::Context context);
+
+  // Apply the compound assignment operator. Expects both operands on top
+  // of the stack.
+  void EmitCompoundAssignmentOp(Token::Value op, Expression::Context context);
+
   // Complete a variable assignment.  The right-hand-side value is expected
   // on top of the stack.
   void EmitVariableAssignment(Assignment* expr);
@@ -105,6 +276,12 @@
   void SetStatementPosition(Statement* stmt);
   void SetSourcePosition(int pos);
 
+  // Non-local control flow support.
+  void EnterFinallyBlock();
+  void ExitFinallyBlock();
+  void ThrowException();
+
+  // Loop nesting counter.
   int loop_depth() { return loop_depth_; }
   void increment_loop_depth() { loop_depth_++; }
   void decrement_loop_depth() {
@@ -112,11 +289,22 @@
     loop_depth_--;
   }
 
+  MacroAssembler* masm() { return masm_; }
+  static Register result_register();
+  static Register context_register();
+
+  // Set fields in the stack frame. Offsets are the frame pointer relative
+  // offsets defined in, e.g., StandardFrameConstants.
+  void StoreToFrameField(int frame_offset, Register value);
+
+  // Load a value from the current context. Indices are defined as an enum
+  // in v8::internal::Context.
+  void LoadContextField(Register dst, int context_index);
+
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
-
   // Handles the shortcutted logical binary operations in VisitBinaryOperation.
   void EmitLogicalOperation(BinaryOperation* expr);
 
@@ -125,11 +313,14 @@
   Handle<Script> script_;
   bool is_eval_;
   Label return_label_;
+  NestedStatement* nesting_stack_;
   int loop_depth_;
 
   Label* true_label_;
   Label* false_label_;
 
+  friend class NestedStatement;
+
   DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
 };
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f3b2b0c..e4bb925 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -168,6 +168,12 @@
       if (first_deallocated()) {
         first_deallocated()->set_next(head());
       }
+      // Check that we are not passing a finalized external string to
+      // the callback.
+      ASSERT(!object_->IsExternalAsciiString() ||
+             ExternalAsciiString::cast(object_)->resource() != NULL);
+      ASSERT(!object_->IsExternalTwoByteString() ||
+             ExternalTwoByteString::cast(object_)->resource() != NULL);
       // Leaving V8.
       VMState state(EXTERNAL);
       func(object, par);
@@ -436,15 +442,15 @@
   *stats->near_death_global_handle_count = 0;
   *stats->destroyed_global_handle_count = 0;
   for (Node* current = head_; current != NULL; current = current->next()) {
-    *stats->global_handle_count++;
+    *stats->global_handle_count += 1;
     if (current->state_ == Node::WEAK) {
-      *stats->weak_global_handle_count++;
+      *stats->weak_global_handle_count += 1;
     } else if (current->state_ == Node::PENDING) {
-      *stats->pending_global_handle_count++;
+      *stats->pending_global_handle_count += 1;
     } else if (current->state_ == Node::NEAR_DEATH) {
-      *stats->near_death_global_handle_count++;
+      *stats->near_death_global_handle_count += 1;
     } else if (current->state_ == Node::DESTROYED) {
-      *stats->destroyed_global_handle_count++;
+      *stats->destroyed_global_handle_count += 1;
     }
   }
 }
@@ -507,5 +513,4 @@
   object_groups->Clear();
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/globals.h b/src/globals.h
index ad0539f..462ff74 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -294,7 +294,7 @@
 
 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
 
-enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
+enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
 
 
 // A CodeDesc describes a buffer holding instructions and relocation
diff --git a/src/heap-inl.h b/src/heap-inl.h
index eccd5ee..992d89a 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -109,6 +109,19 @@
 }
 
 
+void Heap::FinalizeExternalString(String* string) {
+  ASSERT(string->IsExternalString());
+  v8::String::ExternalStringResourceBase** resource_addr =
+      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+          reinterpret_cast<byte*>(string) +
+          ExternalString::kResourceOffset -
+          kHeapObjectTag);
+  delete *resource_addr;
+  // Clear the resource pointer in the string.
+  *resource_addr = NULL;
+}
+
+
 Object* Heap::AllocateRawMap() {
 #ifdef DEBUG
   Counters::objs_since_last_full.Increment();
@@ -321,6 +334,56 @@
 #endif
 
 
+void ExternalStringTable::AddString(String* string) {
+  ASSERT(string->IsExternalString());
+  if (Heap::InNewSpace(string)) {
+    new_space_strings_.Add(string);
+  } else {
+    old_space_strings_.Add(string);
+  }
+}
+
+
+void ExternalStringTable::Iterate(ObjectVisitor* v) {
+  if (!new_space_strings_.is_empty()) {
+    Object** start = &new_space_strings_[0];
+    v->VisitPointers(start, start + new_space_strings_.length());
+  }
+  if (!old_space_strings_.is_empty()) {
+    Object** start = &old_space_strings_[0];
+    v->VisitPointers(start, start + old_space_strings_.length());
+  }
+}
+
+
+// Verify() is inline to avoid ifdef-s around its calls in release
+// mode.
+void ExternalStringTable::Verify() {
+#ifdef DEBUG
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    ASSERT(Heap::InNewSpace(new_space_strings_[i]));
+    ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
+  }
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+    ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
+  }
+#endif
+}
+
+
+void ExternalStringTable::AddOldString(String* string) {
+  ASSERT(string->IsExternalString());
+  ASSERT(!Heap::InNewSpace(string));
+  old_space_strings_.Add(string);
+}
+
+
+void ExternalStringTable::ShrinkNewStrings(int position) {
+  new_space_strings_.Rewind(position);
+  Verify();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_INL_H_
diff --git a/src/heap.cc b/src/heap.cc
index 4e4cd1c..b9aa95c 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -733,7 +733,7 @@
 
   ScavengeVisitor scavenge_visitor;
   // Copy roots.
-  IterateRoots(&scavenge_visitor, VISIT_ALL);
+  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
   // Copy objects reachable from the old generation.  By definition,
   // there are no intergenerational pointers in code or data spaces.
@@ -753,6 +753,63 @@
     }
   }
 
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+  ScavengeExternalStringTable();
+  ASSERT(new_space_front == new_space_.top());
+
+  // Set age mark.
+  new_space_.set_age_mark(new_space_.top());
+
+  // Update how much has survived scavenge.
+  survived_since_last_expansion_ +=
+      (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
+  LOG(ResourceEvent("scavenge", "end"));
+
+  gc_state_ = NOT_IN_GC;
+}
+
+
+void Heap::ScavengeExternalStringTable() {
+  ExternalStringTable::Verify();
+
+  if (ExternalStringTable::new_space_strings_.is_empty()) return;
+
+  Object** start = &ExternalStringTable::new_space_strings_[0];
+  Object** end = start + ExternalStringTable::new_space_strings_.length();
+  Object** last = start;
+
+  for (Object** p = start; p < end; ++p) {
+    ASSERT(Heap::InFromSpace(*p));
+    MapWord first_word = HeapObject::cast(*p)->map_word();
+
+    if (!first_word.IsForwardingAddress()) {
+      // Unreachable external string can be finalized.
+      FinalizeExternalString(String::cast(*p));
+      continue;
+    }
+
+    // String is still reachable.
+    String* target = String::cast(first_word.ToForwardingAddress());
+    ASSERT(target->IsExternalString());
+
+    if (Heap::InNewSpace(target)) {
+      // String is still in new space.  Update the table entry.
+      *last = target;
+      ++last;
+    } else {
+      // String got promoted.  Move it to the old string list.
+      ExternalStringTable::AddOldString(target);
+    }
+  }
+
+  ExternalStringTable::ShrinkNewStrings(last - start);
+}
+
+
+Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
+                         Address new_space_front) {
   do {
     ASSERT(new_space_front <= new_space_.top());
 
@@ -761,7 +818,7 @@
     // queue is empty.
     while (new_space_front < new_space_.top()) {
       HeapObject* object = HeapObject::FromAddress(new_space_front);
-      object->Iterate(&scavenge_visitor);
+      object->Iterate(scavenge_visitor);
       new_space_front += object->Size();
     }
 
@@ -783,7 +840,7 @@
       RecordCopiedObject(target);
 #endif
       // Visit the newly copied object for pointers to new space.
-      target->Iterate(&scavenge_visitor);
+      target->Iterate(scavenge_visitor);
       UpdateRSet(target);
     }
 
@@ -791,16 +848,7 @@
     // (there are currently no more unswept promoted objects).
   } while (new_space_front < new_space_.top());
 
-  // Set age mark.
-  new_space_.set_age_mark(new_space_.top());
-
-  // Update how much has survived scavenge.
-  survived_since_last_expansion_ +=
-      (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
-
-  LOG(ResourceEvent("scavenge", "end"));
-
-  gc_state_ = NOT_IN_GC;
+  return new_space_front;
 }
 
 
@@ -3175,6 +3223,11 @@
   IterateStrongRoots(v, mode);
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
   v->Synchronize("symbol_table");
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
+    // Scavenge collections have special processing for this.
+    ExternalStringTable::Iterate(v);
+  }
+  v->Synchronize("external_string_table");
 }
 
 
@@ -3203,11 +3256,12 @@
   HandleScopeImplementer::Iterate(v);
   v->Synchronize("handlescope");
 
-  // Iterate over the builtin code objects and code stubs in the heap. Note
-  // that it is not strictly necessary to iterate over code objects on
-  // scavenge collections.  We still do it here because this same function
-  // is used by the mark-sweep collector and the deserializer.
-  Builtins::IterateBuiltins(v);
+  // Iterate over the builtin code objects and code stubs in the
+  // heap. Note that it is not necessary to iterate over code objects
+  // on scavenge collections.
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
+    Builtins::IterateBuiltins(v);
+  }
   v->Synchronize("builtins");
 
   // Iterate over global handles.
@@ -3424,6 +3478,8 @@
 void Heap::TearDown() {
   GlobalHandles::TearDown();
 
+  ExternalStringTable::TearDown();
+
   new_space_.TearDown();
 
   if (old_pointer_space_ != NULL) {
@@ -3839,8 +3895,8 @@
 
 // Triggers a depth-first traversal of reachable objects from roots
 // and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject() {
-  search_target = NULL;
+void Heap::TracePathToObject(Object* target) {
+  search_target = target;
   search_for_any_global = false;
 
   MarkRootVisitor root_visitor;
@@ -3907,8 +3963,8 @@
 int KeyedLookupCache::Hash(Map* map, String* name) {
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
-  return (addr_hash ^ name->Hash()) % kLength;
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
+  return (addr_hash ^ name->Hash()) & kCapacityMask;
 }
 
 
@@ -3991,4 +4047,35 @@
 }
 
 
+void ExternalStringTable::CleanUp() {
+  int last = 0;
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
+    if (Heap::InNewSpace(new_space_strings_[i])) {
+      new_space_strings_[last++] = new_space_strings_[i];
+    } else {
+      old_space_strings_.Add(new_space_strings_[i]);
+    }
+  }
+  new_space_strings_.Rewind(last);
+  last = 0;
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
+    ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+    old_space_strings_[last++] = old_space_strings_[i];
+  }
+  old_space_strings_.Rewind(last);
+  Verify();
+}
+
+
+void ExternalStringTable::TearDown() {
+  new_space_strings_.Free();
+  old_space_strings_.Free();
+}
+
+
+List<Object*> ExternalStringTable::new_space_strings_;
+List<Object*> ExternalStringTable::old_space_strings_;
+
 } }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index b37fe4b..05ff16c 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -566,6 +566,10 @@
   static Object* AllocateExternalStringFromTwoByte(
       ExternalTwoByteString::Resource* resource);
 
+  // Finalizes an external string by deleting the associated external
+  // data and clearing the resource pointer.
+  static inline void FinalizeExternalString(String* string);
+
   // Allocates an uninitialized object.  The memory is non-executable if the
   // hardware and OS allow.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -778,7 +782,7 @@
     return disallow_allocation_failure_;
   }
 
-  static void TracePathToObject();
+  static void TracePathToObject(Object* target);
   static void TracePathToGlobal();
 #endif
 
@@ -1039,6 +1043,9 @@
 
   // Performs a minor collection in new generation.
   static void Scavenge();
+  static void ScavengeExternalStringTable();
+  static Address DoScavenge(ObjectVisitor* scavenge_visitor,
+                            Address new_space_front);
 
   // Performs a major collection in the whole heap.
   static void MarkCompact(GCTracer* tracer);
@@ -1293,17 +1300,33 @@
 
   // Clear the cache.
   static void Clear();
+
+  static const int kLength = 64;
+  static const int kCapacityMask = kLength - 1;
+  static const int kMapHashShift = 2;
+
  private:
   static inline int Hash(Map* map, String* name);
-  static const int kLength = 64;
+
+  // Get the address of the keys and field_offsets arrays.  Used in
+  // generated code to perform cache lookups.
+  static Address keys_address() {
+    return reinterpret_cast<Address>(&keys_);
+  }
+
+  static Address field_offsets_address() {
+    return reinterpret_cast<Address>(&field_offsets_);
+  }
+
   struct Key {
     Map* map;
     String* name;
   };
   static Key keys_[kLength];
   static int field_offsets_[kLength];
-};
 
+  friend class ExternalReference;
+};
 
 
 // Cache for mapping (array, property name) into descriptor index.
@@ -1623,6 +1646,39 @@
 };
 
 
+// External strings table is a place where all external strings are
+// registered.  We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable : public AllStatic {
+ public:
+  // Registers an external string.
+  inline static void AddString(String* string);
+
+  inline static void Iterate(ObjectVisitor* v);
+
+  // Restores internal invariant and gets rid of collected strings.
+  // Must be called after each Iterate() that modified the strings.
+  static void CleanUp();
+
+  // Destroys all allocated memory.
+  static void TearDown();
+
+ private:
+  friend class Heap;
+
+  inline static void Verify();
+
+  inline static void AddOldString(String* string);
+
+  // Notifies the table that only a prefix of the new list is valid.
+  inline static void ShrinkNewStrings(int position);
+
+  // To speed up scavenge collections new space string are kept
+  // separate from old space strings.
+  static List<Object*> new_space_strings_;
+  static List<Object*> old_space_strings_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 7c8ff31..417eb0f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -763,19 +763,27 @@
 
 
 const char* GenericBinaryOpStub::GetName() {
-  switch (op_) {
-    case Token::ADD: return "GenericBinaryOpStub_ADD";
-    case Token::SUB: return "GenericBinaryOpStub_SUB";
-    case Token::MUL: return "GenericBinaryOpStub_MUL";
-    case Token::DIV: return "GenericBinaryOpStub_DIV";
-    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
-    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
-    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
-    case Token::SAR: return "GenericBinaryOpStub_SAR";
-    case Token::SHL: return "GenericBinaryOpStub_SHL";
-    case Token::SHR: return "GenericBinaryOpStub_SHR";
-    default:         return "GenericBinaryOpStub";
+  if (name_ != NULL) return name_;
+  const int len = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
   }
+
+  OS::SNPrintF(Vector<char>(name_, len),
+               "GenericBinaryOpStub_%s_%s%s_%s%s",
+               op_name,
+               overwrite_name,
+               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+               args_in_registers_ ? "RegArgs" : "StackArgs",
+               args_reversed_ ? "_R" : "");
+  return name_;
 }
 
 
@@ -8306,6 +8314,7 @@
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ and_(ecx, Operand(edi));
+  ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ test(ecx, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii);
   // Allocate an acsii cons string.
@@ -8348,7 +8357,7 @@
   Label non_ascii_string_add_flat_result;
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  ASSERT(kAsciiStringTag != 0);
+  ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ test(ecx, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii_string_add_flat_result);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 11a5163..cf1cb8a 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -665,7 +665,8 @@
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
-        args_reversed_(false) {
+        args_reversed_(false),
+        name_(NULL) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
@@ -684,6 +685,7 @@
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
+  char* name_;
 
   const char* GetName();
 
@@ -760,11 +762,11 @@
   void Generate(MacroAssembler* masm);
 
   void GenerateCopyCharacters(MacroAssembler* masm,
-                                   Register desc,
-                                   Register src,
-                                   Register count,
-                                   Register scratch,
-                                   bool ascii);
+                              Register desc,
+                              Register src,
+                              Register count,
+                              Register scratch,
+                              bool ascii);
 
   // Should the stub check whether arguments are strings?
   bool string_check_;
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 2f2f3fb..a927e9a 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -515,20 +515,6 @@
 }
 
 
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-  Expression* expr = stmt->expression();
-  if (expr->AsLiteral() != NULL) {
-    __ mov(eax, expr->AsLiteral()->handle());
-  } else {
-    ASSERT_EQ(Expression::kValue, expr->context());
-    Visit(expr);
-    __ pop(eax);
-  }
-  EmitReturnSequence(stmt->statement_pos());
-}
-
-
 void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
@@ -549,14 +535,20 @@
 
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  Expression* rewrite = expr->var()->rewrite();
+  EmitVariableLoad(expr->var(), expr->context());
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+                                         Expression::Context context) {
+  Expression* rewrite = var->rewrite();
   if (rewrite == NULL) {
-    ASSERT(expr->var()->is_global());
+    ASSERT(var->is_global());
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in ecx and the global
     // object on the stack.
     __ push(CodeGenerator::GlobalObject());
-    __ mov(ecx, expr->name());
+    __ mov(ecx, var->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
     // By emitting a nop we make sure that we do not have a test eax
@@ -564,8 +556,7 @@
     // Remember that the assembler may choose to do peephole optimization
     // (eg, push/pop elimination).
     __ nop();
-
-    DropAndMove(expr->context(), eax);
+    DropAndMove(context, eax);
   } else if (rewrite->AsSlot() != NULL) {
     Slot* slot = rewrite->AsSlot();
     if (FLAG_debug_code) {
@@ -586,7 +577,7 @@
           UNREACHABLE();
       }
     }
-    Move(expr->context(), slot, eax);
+    Move(context, slot, eax);
   } else {
     Comment cmnt(masm_, "Variable rewritten to Property");
     // A variable has been rewritten into an explicit access to
@@ -620,9 +611,8 @@
     // Notice: We must not have a "test eax, ..." instruction after
     // the call. It is treated specially by the LoadIC code.
     __ nop();
-
-    // Drop key and object left on the stack by IC, and push the result.
-    DropAndMove(expr->context(), eax, 2);
+    // Drop key and object left on the stack by IC.
+    DropAndMove(context, eax, 2);
   }
 }
 
@@ -656,35 +646,14 @@
 
 void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Label exists;
-  // Registers will be used as follows:
-  // edi = JS function.
-  // ebx = literals array.
-  // eax = boilerplate
-
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
-  int literal_offset =
-      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
-  __ mov(eax, FieldOperand(ebx, literal_offset));
-  __ cmp(eax, Factory::undefined_value());
-  __ j(not_equal, &exists);
-  // Create boilerplate if it does not exist.
-  // Literal array (0).
-  __ push(ebx);
-  // Literal index (1).
+  __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  // Constant properties (2).
   __ push(Immediate(expr->constant_properties()));
-  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ bind(&exists);
-  // eax contains boilerplate.
-  // Clone boilerplate.
-  __ push(eax);
-  if (expr->depth() == 1) {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
   }
 
   // If result_saved == true: The result is saved on top of the
@@ -780,31 +749,14 @@
 
 void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
-  Label make_clone;
-
-  // Fetch the function's literals array.
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
-  // Check if the literal's boilerplate has been instantiated.
-  int offset =
-      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
-  __ mov(eax, FieldOperand(ebx, offset));
-  __ cmp(eax, Factory::undefined_value());
-  __ j(not_equal, &make_clone);
-
-  // Instantiate the boilerplate.
-  __ push(ebx);
+  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->literals()));
-  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
-  __ bind(&make_clone);
-  // Clone the boilerplate.
-  __ push(eax);
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
@@ -874,10 +826,37 @@
 }
 
 
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
+                                              Expression::Context context) {
+  Literal* key = prop->key()->AsLiteral();
+  __ mov(ecx, Immediate(key->handle()));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  Move(context, eax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  Move(context, eax);
+}
+
+
+void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
+                                                 Expression::Context context) {
+  GenericBinaryOpStub stub(op,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS);
+  __ CallStub(&stub);
+  Move(context, eax);
+}
+
+
 void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-
+  ASSERT(var->is_global() || var->slot() != NULL);
   if (var->is_global()) {
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in eax, variable name in
@@ -982,35 +961,6 @@
         UNREACHABLE();
         break;
     }
-  } else {
-    Property* property = var->rewrite()->AsProperty();
-    ASSERT_NOT_NULL(property);
-
-    // Load object and key onto the stack.
-    Slot* object_slot = property->obj()->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
-    Move(Expression::kValue, object_slot, eax);
-
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    Move(Expression::kValue, key_literal);
-
-    // Value to store was pushed before object and key on the stack.
-    __ mov(eax, Operand(esp, 2 * kPointerSize));
-
-    // Arguments to ic is value in eax, object and key on stack.
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-
-    if (expr->context() == Expression::kEffect) {
-      __ add(Operand(esp), Immediate(3 * kPointerSize));
-    } else if (expr->context() == Expression::kValue) {
-      // Value is still on the stack in esp[2 * kPointerSize]
-      __ add(Operand(esp), Immediate(2 * kPointerSize));
-    } else {
-      __ mov(eax, Operand(esp, 2 * kPointerSize));
-      DropAndMove(expr->context(), eax, 3);
-    }
   }
 }
 
@@ -1707,7 +1657,65 @@
 }
 
 
-#undef __
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  Move(expr->context(), eax);
+}
 
 
+Register FastCodeGenerator::result_register() { return eax; }
+
+
+Register FastCodeGenerator::context_register() { return esi; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ mov(Operand(ebp, frame_offset), value);
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FastCodeGenerator::EnterFinallyBlock() {
+  // Cook return address on top of stack (smi encoded Code* delta)
+  ASSERT(!result_register().is(edx));
+  __ mov(edx, Operand(esp, 0));
+  __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  ASSERT_EQ(0, kSmiTag);
+  __ add(edx, Operand(edx));  // Convert to smi.
+  __ mov(Operand(esp, 0), edx);
+  // Store result register while executing finally block.
+  __ push(result_register());
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+  ASSERT(!result_register().is(edx));
+  // Restore result register from stack.
+  __ pop(result_register());
+  // Uncook return address.
+  __ mov(edx, Operand(esp, 0));
+  __ sar(edx, 1);  // Convert smi to int.
+  __ add(Operand(edx), Immediate(masm_->CodeObject()));
+  __ mov(Operand(esp, 0), edx);
+  // And return.
+  __ ret(0);
+}
+
+
+void FastCodeGenerator::ThrowException() {
+  __ push(result_register());
+  __ CallRuntime(Runtime::kThrow, 1);
+}
+
+#undef __
+
 } }  // namespace v8::internal
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 6988fe0..2e30b28 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -48,9 +48,13 @@
 // must always call a backup property load that is complete.
 // This function is safe to call if the receiver has fast properties,
 // or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register r0, Register r1, Register r2,
-                                   Register name) {
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss_label,
+                                   Register r0,
+                                   Register r1,
+                                   Register r2,
+                                   Register name,
+                                   DictionaryCheck check_dictionary) {
   // Register use:
   //
   // r0   - used to hold the property dictionary.
@@ -86,11 +90,15 @@
   __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
   __ j(equal, miss_label, not_taken);
 
-  // Check that the properties array is a dictionary.
+  // Load properties array.
   __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
-         Immediate(Factory::hash_table_map()));
-  __ j(not_equal, miss_label);
+
+  // Check that the properties array is a dictionary.
+  if (check_dictionary == CHECK_DICTIONARY) {
+    __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+           Immediate(Factory::hash_table_map()));
+    __ j(not_equal, miss_label);
+  }
 
   // Compute the capacity mask.
   const int kCapacityOffset =
@@ -223,7 +231,8 @@
   //  -- esp[4] : name
   //  -- esp[8] : receiver
   // -----------------------------------
-  Label slow, check_string, index_int, index_string, check_pixel_array;
+  Label slow, check_string, index_int, index_string;
+  Label check_pixel_array, probe_dictionary;
 
   // Load name and receiver.
   __ mov(eax, Operand(esp, kPointerSize));
@@ -302,17 +311,72 @@
   __ test(ebx, Immediate(String::kIsArrayIndexMask));
   __ j(not_zero, &index_string, not_taken);
 
-  // If the string is a symbol, do a quick inline probe of the receiver's
-  // dictionary, if it exists.
+  // Is the string a symbol?
   __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
   __ test(ebx, Immediate(kIsSymbolMask));
   __ j(zero, &slow, not_taken);
-  // Probe the dictionary leaving result in ecx.
-  GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary leaving result in ecx.
+  __ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Factory::hash_table_map()));
+  __ j(equal, &probe_dictionary);
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(edx, ebx);
+  __ shr(edx, KeyedLookupCache::kMapHashShift);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ shr(eax, String::kHashShift);
+  __ xor_(edx, Operand(eax));
+  __ and_(edx, KeyedLookupCache::kCapacityMask);
+
+  // Load the key (consisting of map and symbol) from the cache and
+  // check for match.
+  ExternalReference cache_keys
+      = ExternalReference::keyed_lookup_cache_keys();
+  __ mov(edi, edx);
+  __ shl(edi, kPointerSizeLog2 + 1);
+  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+  __ j(not_equal, &slow);
+  __ add(Operand(edi), Immediate(kPointerSize));
+  __ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
+  __ cmp(edi, Operand(esp, kPointerSize));
+  __ j(not_equal, &slow);
+
+  // Get field offset and check that it is an in-object property.
+  ExternalReference cache_field_offsets
+      = ExternalReference::keyed_lookup_cache_field_offsets();
+  __ mov(eax,
+         Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
+  __ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+  __ cmp(eax, Operand(edx));
+  __ j(above_equal, &slow);
+
+  // Load in-object property.
+  __ sub(eax, Operand(edx));
+  __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ add(eax, Operand(edx));
+  __ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
+  __ ret(0);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  GenerateDictionaryLoad(masm,
+                         &slow,
+                         ebx,
+                         ecx,
+                         edx,
+                         eax,
+                         DICTIONARY_CHECK_DONE);
   GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
   __ mov(eax, Operand(ecx));
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
+
   // If the hash field contains an array index pick it out. The assert checks
   // that the constants for the maximum number of digits for an array index
   // cached in the hash field and the number of bits reserved for it does not
@@ -885,7 +949,7 @@
                                  bool is_global_object,
                                  Label* miss) {
   // Search dictionary - put result in register edx.
-  GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
+  GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx, CHECK_DICTIONARY);
 
   // Move the result to register edi and check that it isn't a smi.
   __ mov(edi, Operand(edx));
@@ -1088,7 +1152,7 @@
 
   // Search the dictionary placing the result in eax.
   __ bind(&probe);
-  GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
+  GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
   GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
   __ ret(0);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b91caa8..ac2895e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -504,6 +504,13 @@
 }
 
 
+void MacroAssembler::PopTryHandler() {
+  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
                                    JSObject* holder, Register holder_reg,
                                    Register scratch,
@@ -834,10 +841,9 @@
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  mov(scratch1, length);
   ASSERT(kShortSize == 2);
-  shl(scratch1, 1);
-  add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+  // scratch1 = length * 2 + kObjectAlignmentMask.
+  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
   and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
@@ -1016,17 +1022,37 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+Object* MacroAssembler::TryCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  Object* result = stub->TryGetCode();
+  if (!result->IsFailure()) {
+    call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+  }
+  return result;
+}
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  Object* result = stub->TryGetCode();
+  if (!result->IsFailure()) {
+    jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+  }
+  return result;
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
@@ -1331,6 +1357,18 @@
 }
 
 
+void MacroAssembler::Drop(int stack_elements) {
+  if (stack_elements > 0) {
+    add(Operand(esp), Immediate(stack_elements * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+  mov(dst, value);
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a41d42e..160dbcb 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -149,6 +149,8 @@
   // address must be pushed before calling this helper.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  void PopTryHandler();
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -285,12 +287,22 @@
   // ---------------------------------------------------------------------------
   // Runtime calls
 
-  // Call a code stub.
+  // Call a code stub.  Generate the code if necessary.
   void CallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump).
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  Object* TryCallStub(CodeStub* stub);
+
+  // Tail call a code stub (jump).  Generate the code if necessary.
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  Object* TryTailCallStub(CodeStub* stub);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -323,6 +335,12 @@
 
   void Ret();
 
+  void Drop(int element_count);
+
+  void Call(Label* target) { call(target); }
+
+  void Move(Register target, Handle<Object> value);
+
   struct Unresolved {
     int pc;
     uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 425c51d..846b667 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -754,7 +754,7 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
                                         JSObject* holder,
                                         Register receiver,
                                         Register name_reg,
@@ -762,7 +762,8 @@
                                         Register scratch2,
                                         AccessorInfo* callback,
                                         String* name,
-                                        Label* miss) {
+                                        Label* miss,
+                                        Failure** failure) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
   __ j(zero, miss, not_taken);
@@ -798,7 +799,14 @@
   Address getter_address = v8::ToCData<Address>(callback->getter());
   ApiFunction fun(getter_address);
   ApiGetterEntryStub stub(callback_handle, &fun);
-  __ CallStub(&stub);
+  // Calling the stub may try to allocate (if the code is not already
+  // generated).  Do not allow the call to perform a garbage
+  // collection but instead return the allocation failure object.
+  Object* result = masm()->TryCallStub(&stub);
+  if (result->IsFailure()) {
+    *failure = Failure::cast(result);
+    return false;
+  }
 
   // We need to avoid using eax since that now holds the result.
   Register tmp = other.is(eax) ? reg : other;
@@ -806,6 +814,7 @@
   __ LeaveInternalFrame();
 
   __ ret(0);
+  return true;
 }
 
 
@@ -1420,10 +1429,10 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+                                              JSObject* object,
                                               JSObject* holder,
-                                              AccessorInfo* callback,
-                                              String* name) {
+                                              AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- ecx    : name
   //  -- esp[0] : return address
@@ -1432,8 +1441,11 @@
   Label miss;
 
   __ mov(eax, Operand(esp, kPointerSize));
-  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
-                       callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1597,8 +1609,11 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
-                       callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_callback, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/ic.cc b/src/ic.cc
index 2779356..57c9af2 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -409,7 +409,7 @@
   if (!lookup.IsValid()) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    if (is_contextual()) {
+    if (IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
     return TypeError("undefined_method", object, name);
@@ -428,7 +428,7 @@
     // If the object does not have the requested property, check which
     // exception we need to throw.
     if (attr == ABSENT) {
-      if (is_contextual()) {
+      if (IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
       return TypeError("undefined_method", object, name);
@@ -628,7 +628,7 @@
 
   // If lookup is invalid, check if we need to throw an exception.
   if (!lookup.IsValid()) {
-    if (FLAG_strict || is_contextual()) {
+    if (FLAG_strict || IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
     LOG(SuspectReadEvent(*name, *object));
@@ -671,7 +671,7 @@
     if (result->IsFailure()) return result;
     // If the property is not present, check if we need to throw an
     // exception.
-    if (attr == ABSENT && is_contextual()) {
+    if (attr == ABSENT && IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
     return result;
@@ -843,7 +843,7 @@
 
     // If lookup is invalid, check if we need to throw an exception.
     if (!lookup.IsValid()) {
-      if (FLAG_strict || is_contextual()) {
+      if (FLAG_strict || IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
     }
@@ -859,7 +859,7 @@
       if (result->IsFailure()) return result;
       // If the property is not present, check if we need to throw an
       // exception.
-      if (attr == ABSENT && is_contextual()) {
+      if (attr == ABSENT && IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
       return result;
diff --git a/src/ic.h b/src/ic.h
index 8709088..f71eaaa 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -33,6 +33,11 @@
 namespace v8 {
 namespace internal {
 
+// Flag indicating whether an IC stub needs to check that a backing
+// store is in dictionary case.
+enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
+
+
 // IC_UTIL_LIST defines all utility functions called from generated
 // inline caching code. The argument for the macro, ICU, is the function name.
 #define IC_UTIL_LIST(ICU)                             \
@@ -99,7 +104,16 @@
 
   // Returns if this IC is for contextual (no explicit receiver)
   // access to properties.
-  bool is_contextual() {
+  bool IsContextual(Handle<Object> receiver) {
+    if (receiver->IsGlobalObject()) {
+      return SlowIsContextual();
+    } else {
+      ASSERT(!SlowIsContextual());
+      return false;
+    }
+  }
+
+  bool SlowIsContextual() {
     return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
   }
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 63a6d6e..0fe4328 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -77,8 +77,13 @@
 #elif V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
 #include "assembler.h"
+#ifdef V8_ARM_VARIANT_THUMB
+#include "arm/assembler-thumb2.h"
+#include "arm/assembler-thumb2-inl.h"
+#else
 #include "arm/assembler-arm.h"
 #include "arm/assembler-arm-inl.h"
+#endif
 #include "code.h"  // must be after assembler_*.h
 #include "arm/macro-assembler-arm.h"
 #else
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 81819b7..093b18a 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -155,6 +155,8 @@
   // objects (empty string, illegal builtin).
   StubCache::Clear();
 
+  ExternalStringTable::CleanUp();
+
   // If we've just compacted old space there's no reason to check the
   // fragmentation limit. Just return.
   if (HasCompacted()) return;
@@ -369,41 +371,18 @@
 class SymbolTableCleaner : public ObjectVisitor {
  public:
   SymbolTableCleaner() : pointers_removed_(0) { }
-  void VisitPointers(Object** start, Object** end) {
+
+  virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
         // Check if the symbol being pruned is an external symbol. We need to
         // delete the associated external data as this symbol is going away.
 
-        // Since the object is not marked we can access its map word safely
-        // without having to worry about marking bits in the object header.
-        Map* map = HeapObject::cast(*p)->map();
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        uint32_t type = map->instance_type();
-        bool is_external = (type & kStringRepresentationMask) ==
-                           kExternalStringTag;
-        if (is_external) {
-          bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
-          byte* resource_addr = reinterpret_cast<byte*>(*p) +
-                                ExternalString::kResourceOffset -
-                                kHeapObjectTag;
-          if (is_two_byte) {
-            v8::String::ExternalStringResource** resource =
-                reinterpret_cast<v8::String::ExternalStringResource**>
-                (resource_addr);
-            delete *resource;
-            // Clear the resource pointer in the symbol.
-            *resource = NULL;
-          } else {
-            v8::String::ExternalAsciiStringResource** resource =
-                reinterpret_cast<v8::String::ExternalAsciiStringResource**>
-                (resource_addr);
-            delete *resource;
-            // Clear the resource pointer in the symbol.
-            *resource = NULL;
-          }
+        if ((*p)->IsExternalString()) {
+          Heap::FinalizeExternalString(String::cast(*p));
         }
         // Set the entry to null_value (as deleted).
         *p = Heap::raw_unchecked_null_value();
@@ -546,34 +525,7 @@
 }
 
 
-class SymbolMarkingVisitor : public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    MarkingVisitor marker;
-    for (Object** p = start; p < end; p++) {
-      if (!(*p)->IsHeapObject()) continue;
-
-      HeapObject* object = HeapObject::cast(*p);
-      // If the object is marked, we have marked or are in the process
-      // of marking subparts.
-      if (object->IsMarked()) continue;
-
-      // The object is unmarked, we do not need to unmark to use its
-      // map.
-      Map* map = object->map();
-      object->IterateBody(map->instance_type(),
-                          object->SizeFromMap(map),
-                          &marker);
-    }
-  }
-};
-
-
 void MarkCompactCollector::MarkSymbolTable() {
-  // Objects reachable from symbols are marked as live so as to ensure
-  // that if the symbol itself remains alive after GC for any reason,
-  // and if it is a cons string backed by an external string (even indirectly),
-  // then the external string does not receive a weak reference callback.
   SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
   // Mark the symbol table itself.
   SetMark(symbol_table);
@@ -581,11 +533,6 @@
   MarkingVisitor marker;
   symbol_table->IteratePrefix(&marker);
   ProcessMarkingStack(&marker);
-  // Mark subparts of the symbols but not the symbols themselves
-  // (unless reachable from another symbol).
-  SymbolMarkingVisitor symbol_marker;
-  symbol_table->IterateElements(&symbol_marker);
-  ProcessMarkingStack(&marker);
 }
 
 
@@ -774,6 +721,8 @@
   SymbolTableCleaner v;
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
+  ExternalStringTable::Iterate(&v);
+  ExternalStringTable::CleanUp();
 
   // Remove object groups after marking phase.
   GlobalHandles::RemoveObjectGroups();
diff --git a/src/math.js b/src/math.js
index e3d266e..07f7295 100644
--- a/src/math.js
+++ b/src/math.js
@@ -29,7 +29,6 @@
 // Keep reference to original values of some global properties.  This
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
-const $Infinity = global.Infinity;
 const $floor = MathFloor;
 const $random = MathRandom;
 const $abs = MathAbs;
@@ -118,26 +117,40 @@
 
 // ECMA 262 - 15.8.2.11
 function MathMax(arg1, arg2) {  // length == 2
-  var r = -$Infinity;
   var length = %_ArgumentsLength();
-  for (var i = 0; i < length; i++) {
-    var n = ToNumber(%_Arguments(i));
+  if (length == 0) {
+    return -1/0;  // Compiler constant-folds this to -Infinity.
+  }
+  var r = arg1;
+  if (!IS_NUMBER(r)) r = ToNumber(r);
+  if (NUMBER_IS_NAN(r)) return r;
+  for (var i = 1; i < length; i++) {
+    var n = %_Arguments(i);
+    if (!IS_NUMBER(n)) n = ToNumber(n);
     if (NUMBER_IS_NAN(n)) return n;
-    // Make sure +0 is considered greater than -0.
-    if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
+    // Make sure +0 is considered greater than -0.  -0 is never a Smi, +0 can be
+    // a Smi or heap number.
+    if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
   }
   return r;
 }
 
 // ECMA 262 - 15.8.2.12
 function MathMin(arg1, arg2) {  // length == 2
-  var r = $Infinity;
   var length = %_ArgumentsLength();
-  for (var i = 0; i < length; i++) {
-    var n = ToNumber(%_Arguments(i));
+  if (length == 0) {
+    return 1/0;  // Compiler constant-folds this to Infinity.
+  }
+  var r = arg1;
+  if (!IS_NUMBER(r)) r = ToNumber(r);
+  if (NUMBER_IS_NAN(r)) return r;
+  for (var i = 1; i < length; i++) {
+    var n = %_Arguments(i);
+    if (!IS_NUMBER(n)) n = ToNumber(n);
     if (NUMBER_IS_NAN(n)) return n;
-    // Make sure -0 is considered less than +0.
-    if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
+    // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can b a
+    // Smi or a heap number.
+    if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
   }
   return r;
 }
diff --git a/src/messages.js b/src/messages.js
index 1e5053d..bdcbf91 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -157,6 +157,11 @@
       instanceof_nonobject_proto:   "Function has non-object prototype '%0' in instanceof check",
       null_to_object:               "Cannot convert null to object",
       reduce_no_initial:            "Reduce of empty array with no initial value",
+      getter_must_be_callable:      "Getter must be a function: %0",
+      setter_must_be_callable:      "Setter must be a function: %0",
+      value_and_accessor:           "Invalid property.  A property cannot both have accessors and be writable or have a value: %0",
+      proto_object_or_null:         "Object prototype may only be an Object or null",
+      property_desc_object:         "Property description must be an object: %0",
       // RangeError
       invalid_array_length:         "Invalid array length",
       stack_overflow:               "Maximum call stack size exceeded",
diff --git a/src/runtime.cc b/src/runtime.cc
index 65dfd13..ac61de2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -398,6 +398,82 @@
 }
 
 
+static Object* Runtime_CreateObjectLiteral(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+  // Check if boilerplate exists. If not, create it first.
+  Handle<Object> boilerplate(literals->get(literals_index));
+  if (*boilerplate == Heap::undefined_value()) {
+    boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+    if (boilerplate.is_null()) return Failure::Exception();
+    // Update the functions literal and return the boilerplate.
+    literals->set(literals_index, *boilerplate);
+  }
+  return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+  // Check if boilerplate exists. If not, create it first.
+  Handle<Object> boilerplate(literals->get(literals_index));
+  if (*boilerplate == Heap::undefined_value()) {
+    boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+    if (boilerplate.is_null()) return Failure::Exception();
+    // Update the functions literal and return the boilerplate.
+    literals->set(literals_index, *boilerplate);
+  }
+  return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateArrayLiteral(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+  // Check if boilerplate exists. If not, create it first.
+  Handle<Object> boilerplate(literals->get(literals_index));
+  if (*boilerplate == Heap::undefined_value()) {
+    boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+    if (boilerplate.is_null()) return Failure::Exception();
+    // Update the functions literal and return the boilerplate.
+    literals->set(literals_index, *boilerplate);
+  }
+  return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+  // Check if boilerplate exists. If not, create it first.
+  Handle<Object> boilerplate(literals->get(literals_index));
+  if (*boilerplate == Heap::undefined_value()) {
+    boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+    if (boilerplate.is_null()) return Failure::Exception();
+    // Update the functions literal and return the boilerplate.
+    literals->set(literals_index, *boilerplate);
+  }
+  return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+}
+
+
 static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[0]);
@@ -719,12 +795,15 @@
     if (*initial_value != NULL) {
       if (index >= 0) {
         // The variable or constant context slot should always be in
-        // the function context; not in any outer context nor in the
-        // arguments object.
-        ASSERT(holder.is_identical_to(context));
-        if (((attributes & READ_ONLY) == 0) ||
-            context->get(index)->IsTheHole()) {
-          context->set(index, *initial_value);
+        // the function context or the arguments object.
+        if (holder->IsContext()) {
+          ASSERT(holder.is_identical_to(context));
+          if (((attributes & READ_ONLY) == 0) ||
+              context->get(index)->IsTheHole()) {
+            context->set(index, *initial_value);
+          }
+        } else {
+          Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
         }
       } else {
         // Slow case: The property is not in the FixedArray part of the context.
@@ -7805,7 +7884,8 @@
 
   HandleScope scope;
 
-  int initial_size = limit < 10 ? limit : 10;
+  limit = Max(limit, 0);  // Ensure that limit is not negative.
+  int initial_size = Min(limit, 10);
   Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
 
   StackFrameIterator iter;
diff --git a/src/runtime.h b/src/runtime.h
index 8580233..f13c424 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -223,6 +223,10 @@
   F(CreateObjectLiteralBoilerplate, 3, 1) \
   F(CloneLiteralBoilerplate, 1, 1) \
   F(CloneShallowLiteralBoilerplate, 1, 1) \
+  F(CreateObjectLiteral, 3, 1) \
+  F(CreateObjectLiteralShallow, 3, 1) \
+  F(CreateArrayLiteral, 3, 1) \
+  F(CreateArrayLiteralShallow, 3, 1) \
   \
   /* Catch context extension objects */ \
   F(CreateCatchExtensionObject, 2, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 105749a..1b65fe5 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -122,6 +122,12 @@
     return %StringCompare(this, x);
   }
 
+  // If one of the operands is undefined, it will convert to NaN and
+  // thus the result should be as if one of the operands was NaN.
+  if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
+    return ncr;
+  }
+
   // Default implementation.
   var a = %ToPrimitive(this, NUMBER_HINT);
   var b = %ToPrimitive(x, NUMBER_HINT);
diff --git a/src/scopes.cc b/src/scopes.cc
index 7da06cd..a47d373 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -189,8 +189,7 @@
       variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
                          false, Variable::THIS);
   var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
-  receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
-  receiver_->BindTo(var);
+  receiver_ = var;
 
   if (is_function_scope()) {
     // Declare 'arguments' variable which exists in all functions.
diff --git a/src/scopes.h b/src/scopes.h
index fc627df..9b506d9 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -206,8 +206,13 @@
   // ---------------------------------------------------------------------------
   // Accessors.
 
-  // The variable corresponding to the (function) receiver.
-  VariableProxy* receiver() const  { return receiver_; }
+  // A new variable proxy corresponding to the (function) receiver.
+  VariableProxy* receiver() const {
+    VariableProxy* proxy =
+        new VariableProxy(Factory::this_symbol(), true, false);
+    proxy->BindTo(receiver_);
+    return proxy;
+  }
 
   // The variable holding the function literal for named function
   // literals, or NULL.
@@ -314,7 +319,7 @@
   // Declarations.
   ZoneList<Declaration*> decls_;
   // Convenience variable.
-  VariableProxy* receiver_;
+  Variable* receiver_;
   // Function variable, if any; function scopes only.
   Variable* function_;
   // Convenience variable; function scopes only.
diff --git a/src/serialize.cc b/src/serialize.cc
index 899e2e7..76d68b0 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -55,9 +55,8 @@
 
   static int MappedTo(HeapObject* obj) {
     ASSERT(IsMapped(obj));
-    return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
-                                      Hash(obj),
-                                      false)->value);
+    return static_cast<int>(reinterpret_cast<intptr_t>(
+        serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
   }
 
   static void Map(HeapObject* obj, int to) {
@@ -81,7 +80,7 @@
   }
 
   static uint32_t Hash(HeapObject* obj) {
-    return reinterpret_cast<intptr_t>(obj->address());
+    return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
   }
 
   static void* Key(HeapObject* obj) {
@@ -485,6 +484,15 @@
       21,
       "NativeRegExpMacroAssembler::GrowStack()");
 #endif
+  // Keyed lookup cache.
+  Add(ExternalReference::keyed_lookup_cache_keys().address(),
+      UNCLASSIFIED,
+      22,
+      "KeyedLookupCache::keys()");
+  Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
+      UNCLASSIFIED,
+      23,
+      "KeyedLookupCache::field_offsets()");
 }
 
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 51d9ddb..9ab83be 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -120,7 +120,7 @@
   Object* code = receiver->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
-    code = compiler.CompileLoadCallback(receiver, holder, callback, name);
+    code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -831,7 +831,7 @@
   // can't use either LoadIC or KeyedLoadIC constructors.
   IC ic(IC::NO_EXTRA_FRAME);
   ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
-  if (!ic.is_contextual()) return Heap::undefined_value();
+  if (!ic.SlowIsContextual()) return Heap::undefined_value();
 
   // Throw a reference error.
   HandleScope scope;
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 788c532..2418c1f 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -405,7 +405,7 @@
                          String* name,
                          Label* miss);
 
-  void GenerateLoadCallback(JSObject* object,
+  bool GenerateLoadCallback(JSObject* object,
                             JSObject* holder,
                             Register receiver,
                             Register name_reg,
@@ -413,7 +413,8 @@
                             Register scratch2,
                             AccessorInfo* callback,
                             String* name,
-                            Label* miss);
+                            Label* miss,
+                            Failure** failure);
 
   void GenerateLoadConstant(JSObject* object,
                             JSObject* holder,
@@ -447,10 +448,10 @@
                            JSObject* holder,
                            int index,
                            String* name);
-  Object* CompileLoadCallback(JSObject* object,
+  Object* CompileLoadCallback(String* name,
+                              JSObject* object,
                               JSObject* holder,
-                              AccessorInfo* callback,
-                              String* name);
+                              AccessorInfo* callback);
   Object* CompileLoadConstant(JSObject* object,
                               JSObject* holder,
                               Object* value,
diff --git a/src/token.cc b/src/token.cc
index 0a4ad4c..8cee99b 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -32,13 +32,11 @@
 namespace v8 {
 namespace internal {
 
-#ifdef DEBUG
 #define T(name, string, precedence) #name,
 const char* Token::name_[NUM_TOKENS] = {
   TOKEN_LIST(T, T, IGNORE_TOKEN)
 };
 #undef T
-#endif
 
 
 #define T(name, string, precedence) string,
diff --git a/src/token.h b/src/token.h
index a60704c..2a228d6 100644
--- a/src/token.h
+++ b/src/token.h
@@ -66,8 +66,9 @@
   T(DEC, "--", 0)                                                       \
                                                                         \
   /* Assignment operators. */                                           \
-  /* IsAssignmentOp() relies on this block of enum values */            \
-  /* being contiguous and sorted in the same order! */                  \
+  /* IsAssignmentOp() and Assignment::is_compound() relies on */        \
+  /* this block of enum values being contiguous and sorted in the */    \
+  /* same order! */                                                     \
   T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
   T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
   T(ASSIGN, "=", 2)                                                     \
@@ -211,14 +212,12 @@
   };
 #undef T
 
-#ifdef DEBUG
   // Returns a string corresponding to the C++ token name
   // (e.g. "LT" for the token LT).
   static const char* Name(Value tok) {
     ASSERT(0 <= tok && tok < NUM_TOKENS);
     return name_[tok];
   }
-#endif
 
   // Predicates
   static bool IsAssignmentOp(Value tok) {
@@ -261,9 +260,7 @@
   }
 
  private:
-#ifdef DEBUG
   static const char* name_[NUM_TOKENS];
-#endif
   static const char* string_[NUM_TOKENS];
   static int8_t precedence_[NUM_TOKENS];
 };
diff --git a/src/v8-counters.h b/src/v8-counters.h
index d6f53fa..158824d 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -74,8 +74,6 @@
   SC(objs_since_last_full, V8.ObjsSinceLastFull)                 \
   SC(symbol_table_capacity, V8.SymbolTableCapacity)              \
   SC(number_of_symbols, V8.NumberOfSymbols)                      \
-  /* Current amount of memory in external string buffers. */     \
-  SC(total_external_string_memory, V8.TotalExternalStringMemory) \
   SC(script_wrappers, V8.ScriptWrappers)                         \
   SC(call_initialize_stubs, V8.CallInitializeStubs)              \
   SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)      \
diff --git a/src/v8natives.js b/src/v8natives.js
index 8f9adcb..a664095 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -41,6 +41,7 @@
 const $isNaN = GlobalIsNaN;
 const $isFinite = GlobalIsFinite;
 
+
 // ----------------------------------------------------------------------------
 
 
@@ -87,7 +88,7 @@
 
 // ECMA-262 - 15.1.2.2
 function GlobalParseInt(string, radix) {
-  if (radix === void 0) {
+  if (IS_UNDEFINED(radix)) {
     // Some people use parseInt instead of Math.floor.  This
     // optimization makes parseInt on a Smi 12 times faster (60ns
     // vs 800ns).  The following optimization makes parseInt on a
@@ -280,6 +281,207 @@
 }
 
 
+// ES5 8.10.1.
+function IsAccessorDescriptor(desc) {
+  if (IS_UNDEFINED(desc)) return false;
+  return desc.hasGetter_ || desc.hasSetter_;
+}
+
+
+// ES5 8.10.2.
+function IsDataDescriptor(desc) {
+  if (IS_UNDEFINED(desc)) return false;
+  return desc.hasValue_ || desc.hasWritable_;
+}
+
+
+// ES5 8.10.3.
+function IsGenericDescriptor(desc) {
+  return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
+}
+
+
+function IsInconsistentDescriptor(desc) {
+  return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
+}
+
+
+// ES5 8.10.5.
+function ToPropertyDescriptor(obj) {
+  if (!IS_OBJECT(obj)) {
+    throw MakeTypeError("property_desc_object", [obj]);
+  }
+  var desc = new PropertyDescriptor();
+
+  if ("enumerable" in obj) {
+    desc.setEnumerable(ToBoolean(obj.enumerable));
+  }
+
+
+  if ("configurable" in obj) {
+    desc.setConfigurable(ToBoolean(obj.configurable));
+  }
+
+  if ("value" in obj) {
+    desc.setValue(obj.value);
+  }
+
+  if ("writable" in obj) {
+    desc.setWritable(ToBoolean(obj.writable));
+  }
+
+  if ("get" in obj) {
+    var get = obj.get;
+    if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
+      throw MakeTypeError("getter_must_be_callable", [get]);
+    }
+    desc.setGet(get);
+  }
+
+  if ("set" in obj) {
+    var set = obj.set;
+    if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
+      throw MakeTypeError("setter_must_be_callable", [set]);
+    }
+    desc.setSet(set);
+  }
+
+  if (IsInconsistentDescriptor(desc)) {
+    throw MakeTypeError("value_and_accessor", [obj]);
+  }
+  return desc;
+}
+
+
+function PropertyDescriptor() {
+  // Initialize here so they are all in-object and have the same map.
+  // Default values from ES5 8.6.1.
+  this.value_ = void 0;
+  this.hasValue_ = false;
+  this.writable_ = false;
+  this.hasWritable_ = false;
+  this.enumerable_ = false;
+  this.configurable_ = false;
+  this.get_ = void 0;
+  this.hasGetter_ = false;
+  this.set_ = void 0;
+  this.hasSetter_ = false;
+}
+
+
+PropertyDescriptor.prototype.setValue = function(value) {
+  this.value_ = value;
+  this.hasValue_ = true;
+}
+
+
+PropertyDescriptor.prototype.getValue = function() {
+  return this.value_;
+}
+
+
+PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
+  this.enumerable_ = enumerable;
+}
+
+
+PropertyDescriptor.prototype.isEnumerable = function () {
+  return this.enumerable_;
+}
+
+
+PropertyDescriptor.prototype.setWritable = function(writable) {
+  this.writable_ = writable;
+  this.hasWritable_ = true;
+}
+
+
+PropertyDescriptor.prototype.isWritable = function() {
+  return this.writable_;
+}
+
+
+PropertyDescriptor.prototype.setConfigurable = function(configurable) {
+  this.configurable_ = configurable;
+}
+
+
+PropertyDescriptor.prototype.isConfigurable = function() {
+  return this.configurable_;
+}
+
+
+PropertyDescriptor.prototype.setGet = function(get) {
+  this.get_ = get;
+  this.hasGetter_ = true;
+}
+
+
+PropertyDescriptor.prototype.getGet = function() {
+  return this.get_;
+}
+
+
+PropertyDescriptor.prototype.setSet = function(set) {
+  this.set_ = set;
+  this.hasSetter_ = true;
+}
+
+
+PropertyDescriptor.prototype.getSet = function() {
+  return this.set_;
+}
+
+
+// ES5 8.12.9.  This version cannot cope with the property p already
+// being present on obj.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+  var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
+  if (IsDataDescriptor(desc)) {
+    flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
+    %SetProperty(obj, p, desc.getValue(), flag);
+  } else {
+    if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
+    if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
+  }
+  return true;
+}
+
+
+// ES5 section 15.2.3.5.
+function ObjectCreate(proto, properties) {
+  if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
+    throw MakeTypeError("proto_object_or_null", [proto]);
+  }
+  var obj = new $Object();
+  obj.__proto__ = proto;
+  if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
+  return obj;
+}
+
+
+// ES5 section 15.2.3.7.  This version cannot cope with the properies already
+// being present on obj.  Therefore it is not exposed as
+// Object.defineProperties yet.
+function ObjectDefineProperties(obj, properties) {
+  var props = ToObject(properties);
+  var key_values = [];
+  for (var key in props) {
+    if (%HasLocalProperty(props, key)) {
+      key_values.push(key);
+      var value = props[key];
+      var desc = ToPropertyDescriptor(value);
+      key_values.push(desc);
+    }
+  }
+  for (var i = 0; i < key_values.length; i += 2) {
+    var key = key_values[i];
+    var desc = key_values[i + 1];
+    DefineOwnProperty(obj, key, desc, true);
+  }
+}
+
+
 %SetCode($Object, function(x) {
   if (%_IsConstructCall()) {
     if (x == null) return this;
@@ -309,7 +511,8 @@
     "__lookupSetter__", ObjectLookupSetter
   ));
   InstallFunctions($Object, DONT_ENUM, $Array(
-    "keys", ObjectKeys
+    "keys", ObjectKeys,
+    "create", ObjectCreate
   ));
 }
 
diff --git a/src/version.cc b/src/version.cc
index 61b23b4..4337685 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     0
-#define BUILD_NUMBER      3
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      4
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 36f0e63..a116f9c 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -4051,7 +4051,8 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  Result answer = frame_->CallStub(&stub, 2);
   frame_->Push(&answer);
 }
 
@@ -7371,19 +7372,28 @@
 
 
 const char* GenericBinaryOpStub::GetName() {
-  switch (op_) {
-    case Token::ADD: return "GenericBinaryOpStub_ADD";
-    case Token::SUB: return "GenericBinaryOpStub_SUB";
-    case Token::MUL: return "GenericBinaryOpStub_MUL";
-    case Token::DIV: return "GenericBinaryOpStub_DIV";
-    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
-    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
-    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
-    case Token::SAR: return "GenericBinaryOpStub_SAR";
-    case Token::SHL: return "GenericBinaryOpStub_SHL";
-    case Token::SHR: return "GenericBinaryOpStub_SHR";
-    default:         return "GenericBinaryOpStub";
+  if (name_ != NULL) return name_;
+  const int len = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
   }
+
+  OS::SNPrintF(Vector<char>(name_, len),
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+               op_name,
+               overwrite_name,
+               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+               args_in_registers_ ? "RegArgs" : "StackArgs",
+               args_reversed_ ? "_R" : "",
+               use_sse3_ ? "SSE3" : "SSE2");
+  return name_;
 }
 
 
@@ -7796,8 +7806,8 @@
       __ j(above_equal, &string1);
 
       // First and second argument are strings.
-      Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
-      __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
+      StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+      __ TailCallStub(&stub);
 
       // Only first argument is a string.
       __ bind(&string1);
@@ -7880,6 +7890,234 @@
   return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
 }
 
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+  Label string_add_runtime;
+
+  // Load the two arguments.
+  __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument.
+  __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (string_check_) {
+    Condition is_smi;
+    is_smi = masm->CheckSmi(rax);
+    __ j(is_smi, &string_add_runtime);
+    __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
+    __ j(above_equal, &string_add_runtime);
+
+    // First argument is a a string, test second.
+    is_smi = masm->CheckSmi(rdx);
+    __ j(is_smi, &string_add_runtime);
+    __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
+    __ j(above_equal, &string_add_runtime);
+  }
+
+  // Both arguments are strings.
+  // rax: first string
+  // rdx: second string
+  // Check if either of the strings are empty. In that case return the other.
+  Label second_not_zero_length, both_not_zero_length;
+  __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
+  __ testl(rcx, rcx);
+  __ j(not_zero, &second_not_zero_length);
+  // Second string is empty, result is first string which is already in rax.
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+  __ bind(&second_not_zero_length);
+  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ testl(rbx, rbx);
+  __ j(not_zero, &both_not_zero_length);
+  // First string is empty, result is second string which is in rdx.
+  __ movq(rax, rdx);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+
+  // Both strings are non-empty.
+  // rax: first string
+  // rbx: length of first string
+  // ecx: length of second string
+  // edx: second string
+  // r8: instance type of first string if string check was performed above
+  // r9: instance type of first string if string check was performed above
+  Label string_add_flat_result;
+  __ bind(&both_not_zero_length);
+  // Look at the length of the result of adding the two strings.
+  __ addl(rbx, rcx);
+  // Use the runtime system when adding two one character strings, as it
+  // contains optimizations for this specific case using the symbol table.
+  __ cmpl(rbx, Immediate(2));
+  __ j(equal, &string_add_runtime);
+  // If arguments where known to be strings, maps are not loaded to r8 and r9
+  // by the code above.
+  if (!string_check_) {
+    __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
+    __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+  }
+  // Get the instance types of the two strings as they will be needed soon.
+  __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
+  __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+  // Check if resulting string will be flat.
+  __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
+  __ j(below, &string_add_flat_result);
+  // Handle exceptionally long strings in the runtime system.
+  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  __ cmpl(rbx, Immediate(String::kMaxLength));
+  __ j(above, &string_add_runtime);
+
+  // If result is not supposed to be flat, allocate a cons string object. If
+  // both strings are ascii the result is an ascii cons string.
+  // rax: first string
+  // ebx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of second string
+  Label non_ascii, allocated;
+  __ movl(rcx, r8);
+  __ and_(rcx, r9);
+  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  __ testl(rcx, Immediate(kAsciiStringTag));
+  __ j(zero, &non_ascii);
+  // Allocate an acsii cons string.
+  __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+  __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
+          Immediate(String::kEmptyHashField));
+  __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+  __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+  __ movq(rax, rcx);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+  __ bind(&non_ascii);
+  // Allocate a two byte cons string.
+  __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
+  __ jmp(&allocated);
+
+  // Handle creating a flat result. First check that both strings are not
+  // external strings.
+  // rax: first string
+  // ebx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of first string
+  __ bind(&string_add_flat_result);
+  __ movl(rcx, r8);
+  __ and_(rcx, Immediate(kStringRepresentationMask));
+  __ cmpl(rcx, Immediate(kExternalStringTag));
+  __ j(equal, &string_add_runtime);
+  __ movl(rcx, r9);
+  __ and_(rcx, Immediate(kStringRepresentationMask));
+  __ cmpl(rcx, Immediate(kExternalStringTag));
+  __ j(equal, &string_add_runtime);
+  // Now check if both strings are ascii strings.
+  // rax: first string
+  // ebx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of second string
+  Label non_ascii_string_add_flat_result;
+  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  __ testl(r8, Immediate(kAsciiStringTag));
+  __ j(zero, &non_ascii_string_add_flat_result);
+  __ testl(r9, Immediate(kAsciiStringTag));
+  __ j(zero, &string_add_runtime);
+  // Both strings are ascii strings. As they are short they are both flat.
+  __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+  // rcx: result string
+  __ movq(rbx, rcx);
+  // Locate first character of result.
+  __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument
+  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // rax: first char of first argument
+  // rbx: result string
+  // rcx: first character of result
+  // rdx: second string
+  // rdi: length of first argument
+  GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+  // Locate first character of second argument.
+  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // rbx: result string
+  // rcx: next character of result
+  // rdx: first char of second argument
+  // rdi: length of second argument
+  GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+  __ movq(rax, rbx);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+
+  // Handle creating a flat two byte result.
+  // rax: first string - known to be two byte
+  // rbx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of first string
+  __ bind(&non_ascii_string_add_flat_result);
+  __ and_(r9, Immediate(kAsciiStringTag));
+  __ j(not_zero, &string_add_runtime);
+  // Both strings are two byte strings. As they are short they are both
+  // flat.
+  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+  // rcx: result string
+  __ movq(rbx, rcx);
+  // Locate first character of result.
+  __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // rax: first char of first argument
+  // rbx: result string
+  // rcx: first character of result
+  // rdx: second argument
+  // rdi: length of first argument
+  GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+  // Locate first character of second argument.
+  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // rbx: result string
+  // rcx: next character of result
+  // rdx: first char of second argument
+  // rdi: length of second argument
+  GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+  __ movq(rax, rbx);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
+void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
+                                           Register dest,
+                                           Register src,
+                                           Register count,
+                                           bool ascii) {
+  Label loop;
+  __ bind(&loop);
+  // This loop just copies one character at a time, as it is only used for very
+  // short strings.
+  if (ascii) {
+    __ movb(kScratchRegister, Operand(src, 0));
+    __ movb(Operand(dest, 0), kScratchRegister);
+    __ addq(src, Immediate(1));
+    __ addq(dest, Immediate(1));
+  } else {
+    __ movzxwl(kScratchRegister, Operand(src, 0));
+    __ movw(Operand(dest, 0), kScratchRegister);
+    __ addq(src, Immediate(2));
+    __ addq(dest, Immediate(2));
+  }
+  __ subl(count, Immediate(1));
+  __ j(not_zero, &loop);
+}
+
+
 #undef __
 
 #define __ masm.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 8539884..3cc4db7 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -670,7 +670,8 @@
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
-        args_reversed_(false) {
+        args_reversed_(false),
+        name_(NULL) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
@@ -689,6 +690,7 @@
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
+  char* name_;
 
   const char* GetName();
 
@@ -745,6 +747,36 @@
 };
 
 
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+  explicit StringAddStub(StringAddFlags flags) {
+    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+  }
+
+ private:
+  Major MajorKey() { return StringAdd; }
+  int MinorKey() { return string_check_ ? 0 : 1; }
+
+  void Generate(MacroAssembler* masm);
+
+  void GenerateCopyCharacters(MacroAssembler* masm,
+                              Register desc,
+                              Register src,
+                              Register count,
+                              bool ascii);
+
+  // Should the stub check whether arguments are strings?
+  bool string_check_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 13edaf2..cc7c2f0 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -525,20 +525,6 @@
 }
 
 
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-  Expression* expr = stmt->expression();
-  if (expr->AsLiteral() != NULL) {
-    __ Move(rax, expr->AsLiteral()->handle());
-  } else {
-    Visit(expr);
-    ASSERT_EQ(Expression::kValue, expr->context());
-    __ pop(rax);
-  }
-  EmitReturnSequence(stmt->statement_pos());
-}
-
-
 void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
@@ -559,14 +545,20 @@
 
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  Expression* rewrite = expr->var()->rewrite();
+  EmitVariableLoad(expr->var(), expr->context());
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+                                          Expression::Context context) {
+  Expression* rewrite = var->rewrite();
   if (rewrite == NULL) {
-    ASSERT(expr->var()->is_global());
+    ASSERT(var->is_global());
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in rcx and the global
     // object on the stack.
     __ push(CodeGenerator::GlobalObject());
-    __ Move(rcx, expr->name());
+    __ Move(rcx, var->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
     // A test rax instruction following the call is used by the IC to
@@ -574,7 +566,7 @@
     // is no test rax instruction here.
     __ nop();
 
-    DropAndMove(expr->context(), rax);
+    DropAndMove(context, rax);
   } else if (rewrite->AsSlot() != NULL) {
     Slot* slot = rewrite->AsSlot();
     if (FLAG_debug_code) {
@@ -595,7 +587,7 @@
           UNREACHABLE();
       }
     }
-    Move(expr->context(), slot, rax);
+    Move(context, slot, rax);
   } else {
     // A variable has been rewritten into an explicit access to
     // an object property.
@@ -629,7 +621,7 @@
     // the call. It is treated specially by the LoadIC code.
 
     // Drop key and object left on the stack by IC, and push the result.
-    DropAndMove(expr->context(), rax, 2);
+    DropAndMove(context, rax, 2);
   }
 }
 
@@ -663,31 +655,14 @@
 
 void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Label boilerplate_exists;
-
   __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
-  int literal_offset =
-    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
-  __ movq(rax, FieldOperand(rbx, literal_offset));
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &boilerplate_exists);
-  // Create boilerplate if it does not exist.
-  // Literal array (0).
-  __ push(rbx);
-  // Literal index (1).
+  __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  // Constant properties (2).
   __ Push(expr->constant_properties());
-  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ bind(&boilerplate_exists);
-  // rax contains boilerplate.
-  // Clone boilerplate.
-  __ push(rax);
-  if (expr->depth() == 1) {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
   }
 
   // If result_saved == true: The result is saved on top of the
@@ -783,31 +758,14 @@
 
 void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
-  Label make_clone;
-
-  // Fetch the function's literals array.
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  __ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
-  // Check if the literal's boilerplate has been instantiated.
-  int offset =
-      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
-  __ movq(rax, FieldOperand(rbx, offset));
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &make_clone);
-
-  // Instantiate the boilerplate.
-  __ push(rbx);
+  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
   __ Push(expr->literals());
-  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
-  __ bind(&make_clone);
-  // Clone the boilerplate.
-  __ push(rax);
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else {
-    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
@@ -877,10 +835,37 @@
 }
 
 
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
+                                              Expression::Context context) {
+  Literal* key = prop->key()->AsLiteral();
+  __ Move(rcx, key->handle());
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  Move(context, rax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  Move(context, rax);
+}
+
+
+void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
+                                                 Expression::Context context) {
+  GenericBinaryOpStub stub(op,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS);
+  __ CallStub(&stub);
+  Move(context, rax);
+}
+
+
 void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-
+  ASSERT(var->is_global() || var->slot() != NULL);
   if (var->is_global()) {
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in rax, variable name in
@@ -985,36 +970,6 @@
         UNREACHABLE();
         break;
     }
-  } else {
-    Property* property = var->AsProperty();
-    ASSERT_NOT_NULL(property);
-    // A variable has been rewritten into a property on an object.
-
-    // Load object and key onto the stack.
-    Slot* object_slot = property->obj()->AsSlot();
-    ASSERT_NOT_NULL(object_slot);
-    Move(Expression::kValue, object_slot, rax);
-
-    Literal* key_literal = property->key()->AsLiteral();
-    ASSERT_NOT_NULL(key_literal);
-    Move(Expression::kValue, key_literal);
-
-    // Value to store was pushed before object and key on the stack.
-    __ movq(rax, Operand(rsp, 2 * kPointerSize));
-
-    // Arguments to ic is value in rax, object and key on stack.
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-
-    if (expr->context() == Expression::kEffect) {
-      __ addq(rsp, Immediate(3 * kPointerSize));
-    } else if (expr->context() == Expression::kValue) {
-      // Value is still on the stack in rsp[2 * kPointerSize]
-      __ addq(rsp, Immediate(2 * kPointerSize));
-    } else {
-      __ movq(rax, Operand(rsp, 2 * kPointerSize));
-      DropAndMove(expr->context(), rax, 3);
-    }
   }
 }
 
@@ -1708,6 +1663,69 @@
 }
 
 
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  Move(expr->context(), rax);
+}
+
+
+Register FastCodeGenerator::result_register() { return rax; }
+
+
+Register FastCodeGenerator::context_register() { return rsi; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset),
+            static_cast<intptr_t>(frame_offset));
+  __ movq(Operand(rbp, frame_offset), value);
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+
+void FastCodeGenerator::EnterFinallyBlock() {
+  ASSERT(!result_register().is(rdx));
+  ASSERT(!result_register().is(rcx));
+  // Cook return address on top of stack (smi encoded Code* delta)
+  __ movq(rdx, Operand(rsp, 0));
+  __ Move(rcx, masm_->CodeObject());
+  __ subq(rdx, rcx);
+  __ Integer32ToSmi(rdx, rdx);
+  __ movq(Operand(rsp, 0), rdx);
+  // Store result register while executing finally block.
+  __ push(result_register());
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+  ASSERT(!result_register().is(rdx));
+  ASSERT(!result_register().is(rcx));
+  // Restore result register from stack.
+  __ pop(result_register());
+  // Uncook return address.
+  __ movq(rdx, Operand(rsp, 0));
+  __ SmiToInteger32(rdx, rdx);
+  __ Move(rcx, masm_->CodeObject());
+  __ addq(rdx, rcx);
+  __ movq(Operand(rsp, 0), rdx);
+  // And return.
+  __ ret(0);
+}
+
+
+void FastCodeGenerator::ThrowException() {
+  __ push(result_register());
+  __ CallRuntime(Runtime::kThrow, 1);
+}
+
 #undef __
 
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index ccbc615..9e58f30 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -48,9 +48,13 @@
 // must always call a backup property load that is complete.
 // This function is safe to call if the receiver has fast properties,
 // or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register r0, Register r1, Register r2,
-                                   Register name) {
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss_label,
+                                   Register r0,
+                                   Register r1,
+                                   Register r2,
+                                   Register name,
+                                   DictionaryCheck check_dictionary) {
   // Register use:
   //
   // r0   - used to hold the property dictionary.
@@ -86,10 +90,14 @@
   __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
   __ j(equal, miss_label);
 
-  // Check that the properties array is a dictionary.
+  // Load properties array.
   __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
-  __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
-  __ j(not_equal, miss_label);
+
+  if (check_dictionary == CHECK_DICTIONARY) {
+    // Check that the properties array is a dictionary.
+    __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+    __ j(not_equal, miss_label);
+  }
 
   // Compute the capacity mask.
   const int kCapacityOffset =
@@ -246,7 +254,8 @@
   //  -- rsp[8] : name
   //  -- rsp[16] : receiver
   // -----------------------------------
-  Label slow, check_string, index_int, index_string, check_pixel_array;
+  Label slow, check_string, index_int, index_string;
+  Label check_pixel_array, probe_dictionary;
 
   // Load name and receiver.
   __ movq(rax, Operand(rsp, kPointerSize));
@@ -319,14 +328,68 @@
   __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
   __ testl(rbx, Immediate(String::kIsArrayIndexMask));
 
-  // If the string is a symbol, do a quick inline probe of the receiver's
-  // dictionary, if it exists.
+  // Is the string a symbol?
   __ j(not_zero, &index_string);  // The value in rbx is used at jump target.
   __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
            Immediate(kIsSymbolMask));
   __ j(zero, &slow);
-  // Probe the dictionary leaving result in rcx.
-  GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary leaving result in rcx.
+  __ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
+  __ j(equal, &probe_dictionary);
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movl(rdx, rbx);
+  __ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
+  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+  __ shr(rax, Immediate(String::kHashShift));
+  __ xor_(rdx, rax);
+  __ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
+
+  // Load the key (consisting of map and symbol) from the cache and
+  // check for match.
+  ExternalReference cache_keys
+      = ExternalReference::keyed_lookup_cache_keys();
+  __ movq(rdi, rdx);
+  __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+  __ movq(kScratchRegister, cache_keys);
+  __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
+  __ j(not_equal, &slow);
+  __ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
+  __ cmpq(Operand(rsp, kPointerSize), rdi);
+  __ j(not_equal, &slow);
+
+  // Get field offset which is a 32-bit integer and check that it is
+  // an in-object property.
+  ExternalReference cache_field_offsets
+      = ExternalReference::keyed_lookup_cache_field_offsets();
+  __ movq(kScratchRegister, cache_field_offsets);
+  __ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
+  __ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+  __ cmpq(rax, rdx);
+  __ j(above_equal, &slow);
+
+  // Load in-object property.
+  __ subq(rax, rdx);
+  __ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+  __ addq(rax, rdx);
+  __ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
+  __ ret(0);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  GenerateDictionaryLoad(masm,
+                         &slow,
+                         rbx,
+                         rcx,
+                         rdx,
+                         rax,
+                         DICTIONARY_CHECK_DONE);
   GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@@ -971,8 +1034,8 @@
                                  int argc,
                                  bool is_global_object,
                                  Label* miss) {
-  // Search dictionary - put result in register edx.
-  GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
+  // Search dictionary - put result in register rdx.
+  GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
 
   // Move the result to register rdi and check that it isn't a smi.
   __ movq(rdi, rdx);
@@ -1196,9 +1259,9 @@
           Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &miss);
 
-  // Search the dictionary placing the result in eax.
+  // Search the dictionary placing the result in rax.
   __ bind(&probe);
-  GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
+  GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
   GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
   __ ret(0);
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 7115791..5cf09f2 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -310,6 +310,12 @@
 }
 
 
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
@@ -1339,6 +1345,13 @@
 }
 
 
+void MacroAssembler::Drop(int stack_elements) {
+  if (stack_elements > 0) {
+    addq(rsp, Immediate(stack_elements * kPointerSize));
+  }
+}
+
+
 void MacroAssembler::Test(const Operand& src, Smi* source) {
   intptr_t smi = reinterpret_cast<intptr_t>(source);
   if (is_int32(smi)) {
@@ -1425,6 +1438,16 @@
 }
 
 
+void MacroAssembler::PopTryHandler() {
+  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  // Unlink this handler.
+  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+  pop(Operand(kScratchRegister, 0));
+  // Remove the remaining fields.
+  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -2244,6 +2267,108 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteString(Register result,
+                                           Register length,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  ASSERT(kShortSize == 2);
+  // scratch1 = length * 2 + kObjectAlignmentMask.
+  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+  // Allocate two byte string in new space.
+  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
+                     times_1,
+                     scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+  movl(FieldOperand(result, String::kLengthOffset), length);
+  movl(FieldOperand(result, String::kHashFieldOffset),
+       Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+                                         Register length,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  movl(scratch1, length);
+  ASSERT(kCharSize == 1);
+  addq(scratch1, Immediate(kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+  // Allocate ascii string in new space.
+  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+                     times_1,
+                     scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+  movl(FieldOperand(result, String::kLengthOffset), length);
+  movl(FieldOperand(result, String::kHashFieldOffset),
+       Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateConsString(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(ConsString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(ConsString::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map. The other fields are left uninitialized.
+  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9e7c25c..9720005 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -400,7 +400,7 @@
   void Test(const Operand& dst, Smi* source);
 
   // ---------------------------------------------------------------------------
-  // Macro instructions
+  // Macro instructions.
 
   // Load a register with a long value as efficiently as possible.
   void Set(Register dst, int64_t x);
@@ -412,6 +412,8 @@
   void Cmp(Register dst, Handle<Object> source);
   void Cmp(const Operand& dst, Handle<Object> source);
   void Push(Handle<Object> source);
+  void Drop(int stack_elements);
+  void Call(Label* target) { call(target); }
 
   // Control Flow
   void Jump(Address destination, RelocInfo::Mode rmode);
@@ -443,6 +445,8 @@
   // address must be pushed before calling this helper.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  void PopTryHandler();
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -518,6 +522,32 @@
                           Register scratch,
                           Label* gc_required);
 
+  // Allocate a sequential string. All the header fields of the string object
+  // are initialized.
+  void AllocateTwoByteString(Register result,
+                             Register length,
+                             Register scratch1,
+                             Register scratch2,
+                             Register scratch3,
+                             Label* gc_required);
+  void AllocateAsciiString(Register result,
+                           Register length,
+                           Register scratch1,
+                           Register scratch2,
+                           Register scratch3,
+                           Label* gc_required);
+
+  // Allocate a raw cons string object. Only the map field of the result is
+  // initialized.
+  void AllocateConsString(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required);
+  void AllocateAsciiConsString(Register result,
+                               Register scratch1,
+                               Register scratch2,
+                               Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -557,6 +587,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump).
+  void TailCallStub(CodeStub* stub);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 55b0b87..81e5dae 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -987,10 +987,10 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+                                              JSObject* object,
                                               JSObject* holder,
-                                              AccessorInfo* callback,
-                                              String* name) {
+                                              AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- rcx    : name
   //  -- rsp[0] : return address
@@ -999,8 +999,11 @@
   Label miss;
 
   __ movq(rax, Operand(rsp, kPointerSize));
-  GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
-                       callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1154,8 +1157,11 @@
   __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
-                       callback, name, &miss);
+  Failure* failure = Failure::InternalError();
+  bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+                                      callback, name, &miss, &failure);
+  if (!success) return failure;
+
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_callback, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1610,7 +1616,7 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
                                         JSObject* holder,
                                         Register receiver,
                                         Register name_reg,
@@ -1618,7 +1624,8 @@
                                         Register scratch2,
                                         AccessorInfo* callback,
                                         String* name,
-                                        Label* miss) {
+                                        Label* miss,
+                                        Failure** failure) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
@@ -1641,6 +1648,8 @@
   ExternalReference load_callback_property =
       ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
   __ TailCallRuntime(load_callback_property, 5, 1);
+
+  return true;
 }