Version 3.20.5

Ensured that the length of frozen arrays is immutable  (issue 2711,259548)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@15657 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d3ec509..a49ebac 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2013-07-15: Version 3.20.5
+
+        Ensured that the length of frozen arrays is immutable  (issue 2711,259548)
+
+        Performance and stability improvements on all platforms.
+
+
 2013-07-10: Version 3.20.4
 
         Fixed garbage-collection issue that causes a crash on ARM
diff --git a/DEPS b/DEPS
index 4f1a5cf..ccbaccb 100644
--- a/DEPS
+++ b/DEPS
@@ -8,7 +8,7 @@
     "http://gyp.googlecode.com/svn/trunk@1656",
 
   "v8/third_party/icu":
-    "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@205936",
+    "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@210659",
 }
 
 deps_os = {
diff --git a/Makefile b/Makefile
index f96c1e1..74a416d 100644
--- a/Makefile
+++ b/Makefile
@@ -401,4 +401,4 @@
 	    --revision 1656
 	svn checkout --force \
 	    https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
-	    third_party/icu --revision 205936
+	    third_party/icu --revision 210659
diff --git a/Makefile.nacl b/Makefile.nacl
index 0c98021..0d053b1 100644
--- a/Makefile.nacl
+++ b/Makefile.nacl
@@ -93,4 +93,5 @@
 	CXX=${NACL_CXX} \
 	build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. \
-	              -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
+	              -S.$(subst .,,$(suffix $@)) $(GYPFLAGS) \
+                      -Dwno_array_bounds=-Wno-array-bounds
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index 034fbc3..95e2cd2 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -59,6 +59,10 @@
     'mips_arch_variant%': 'mips32r2',
 
     'v8_enable_backtrace%': 0,
+
+    # Turns on compiler optimizations in Debug builds (#defines are unaffected).
+    'v8_optimized_debug%': 0,
+
     # Enable profiling support. Only required on Windows.
     'v8_enable_prof%': 0,
 
@@ -73,6 +77,9 @@
     'werror%': '-Werror',
     # For a shared library build, results in "libv8-<(soname_version).so".
     'soname_version%': '',
+
+    # Allow to suppress the array bounds warning (default is no suppression).
+    'wno_array_bounds%': '',
   },
   'target_defaults': {
     'conditions': [
@@ -436,14 +443,22 @@
         ],
         'msvs_settings': {
           'VCCLCompilerTool': {
-            'Optimization': '0',
-
             'conditions': [
-              ['OS=="win" and component=="shared_library"', {
+              ['component=="shared_library"', {
                 'RuntimeLibrary': '3',  # /MDd
               }, {
                 'RuntimeLibrary': '1',  # /MTd
               }],
+              ['v8_optimized_debug==1', {
+                'Optimization': '1',
+                'InlineFunctionExpansion': '2',
+                'EnableIntrinsicFunctions': 'true',
+                'FavorSizeOrSpeed': '0',
+                'StringPooling': 'true',
+                'BasicRuntimeChecks': '0',
+              }, {
+                'Optimization': '0',
+              }],
             ],
           },
           'VCLinkerTool': {
@@ -453,7 +468,28 @@
         'conditions': [
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
             'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
-                        '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
+                        '-Wnon-virtual-dtor', '-Woverloaded-virtual',
+                        '<(wno_array_bounds)' ],
+            'conditions': [
+              ['v8_optimized_debug==1', {
+                'cflags!': [
+                  '-O0',
+                  '-O2',
+                  '-Os',
+                ],
+                'cflags': [
+                  '-fdata-sections',
+                  '-ffunction-sections',
+                  '-O1',
+                ],
+              }],
+              ['v8_optimized_debug==1 and gcc_version==44 and clang==0', {
+                'cflags': [
+                  # Avoid crashes with gcc 4.4 in the v8 test suite.
+                  '-fno-tree-vrp',
+                ],
+              }],
+            ],
           }],
           ['OS=="linux" and v8_enable_backtrace==1', {
             # Support for backtrace_symbols.
@@ -475,7 +511,14 @@
           }],
           ['OS=="mac"', {
             'xcode_settings': {
-              'GCC_OPTIMIZATION_LEVEL': '0',  # -O0
+              'conditions': [
+                 ['v8_optimized_debug==1', {
+                   'GCC_OPTIMIZATION_LEVEL': '1',  # -O1
+                   'GCC_STRICT_ALIASING': 'YES',
+                 }, {
+                   'GCC_OPTIMIZATION_LEVEL': '0',  # -O0
+                 }],
+               ],
             },
           }],
         ],
@@ -491,6 +534,7 @@
               '-fdata-sections',
               '-ffunction-sections',
               '-O3',
+              '<(wno_array_bounds)',
             ],
             'conditions': [
               [ 'gcc_version==44 and clang==0', {
@@ -540,7 +584,7 @@
                 'FavorSizeOrSpeed': '0',
                 'StringPooling': 'true',
                 'conditions': [
-                  ['OS=="win" and component=="shared_library"', {
+                  ['component=="shared_library"', {
                     'RuntimeLibrary': '2',  #/MD
                   }, {
                     'RuntimeLibrary': '0',  #/MT
diff --git a/include/v8.h b/include/v8.h
index 5505901..9ce0583 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -4699,6 +4699,12 @@
    */
   static int ContextDisposedNotification();
 
+  /**
+   * Initialize the ICU library bundled with V8. The embedder should only
+   * invoke this method when using the bundled ICU. Returns true on success.
+   */
+  static bool InitializeICU();
+
  private:
   V8();
 
@@ -5393,7 +5399,7 @@
   static const int kNullValueRootIndex = 7;
   static const int kTrueValueRootIndex = 8;
   static const int kFalseValueRootIndex = 9;
-  static const int kEmptyStringRootIndex = 131;
+  static const int kEmptyStringRootIndex = 132;
 
   static const int kNodeClassIdOffset = 1 * kApiPointerSize;
   static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
diff --git a/samples/lineprocessor.cc b/samples/lineprocessor.cc
index 0c84419..4204820 100644
--- a/samples/lineprocessor.cc
+++ b/samples/lineprocessor.cc
@@ -137,8 +137,6 @@
 
 int RunMain(int argc, char* argv[]) {
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
-  v8::V8::SetFlagsFromString("--noenable_i18n",
-                             static_cast<int>(strlen("--noenable_i18n")));
   v8::Isolate* isolate = v8::Isolate::GetCurrent();
   v8::HandleScope handle_scope(isolate);
 
@@ -326,6 +324,7 @@
 
 
 int main(int argc, char* argv[]) {
+  v8::V8::InitializeICU();
   int result = RunMain(argc, argv);
   v8::V8::Dispose();
   return result;
diff --git a/samples/process.cc b/samples/process.cc
index 80805d2..844aee3 100644
--- a/samples/process.cc
+++ b/samples/process.cc
@@ -27,7 +27,6 @@
 
 #include <v8.h>
 
-#include <cstring>
 #include <string>
 #include <map>
 
@@ -628,6 +627,7 @@
 
 
 int main(int argc, char* argv[]) {
+  v8::V8::InitializeICU();
   map<string, string> options;
   string file;
   ParseOptions(argc, argv, options, &file);
@@ -635,8 +635,6 @@
     fprintf(stderr, "No script was specified.\n");
     return 1;
   }
-  V8::SetFlagsFromString("--noenable_i18n",
-                         static_cast<int>(strlen("--noenable_i18n")));
   Isolate* isolate = Isolate::GetCurrent();
   HandleScope scope(isolate);
   Handle<String> source = ReadFile(file);
diff --git a/samples/samples.gyp b/samples/samples.gyp
index cd2d15b..be7b9ea 100644
--- a/samples/samples.gyp
+++ b/samples/samples.gyp
@@ -28,6 +28,7 @@
 {
   'variables': {
     'v8_code': 1,
+    'v8_enable_i18n_support%': 0,
   },
   'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
   'target_defaults': {
@@ -38,6 +39,19 @@
     'include_dirs': [
       '../include',
     ],
+    'conditions': [
+      ['v8_enable_i18n_support==1', {
+        'dependencies': [
+          '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+          '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+        ],
+      }],
+      ['OS=="win" and v8_enable_i18n_support==1', {
+        'dependencies': [
+          '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+        ],
+      }],
+    ],
   },
   'targets': [
     {
diff --git a/samples/shell.cc b/samples/shell.cc
index 5c5f056..710547c 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -66,9 +66,8 @@
 
 
 int main(int argc, char* argv[]) {
+  v8::V8::InitializeICU();
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
-  v8::V8::SetFlagsFromString("--noenable_i18n",
-                             static_cast<int>(strlen("--noenable_i18n")));
   v8::Isolate* isolate = v8::Isolate::GetCurrent();
   run_shell = (argc == 1);
   int result;
diff --git a/src/api.cc b/src/api.cc
index c56bc05..10e69fc 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -45,6 +45,7 @@
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "heap-snapshot-generator-inl.h"
+#include "icu_util.h"
 #include "messages.h"
 #ifdef COMPRESS_STARTUP_DATA_BZ2
 #include "natives.h"
@@ -5426,6 +5427,11 @@
 }
 
 
+bool v8::V8::InitializeICU() {
+  return i::InitializeICU();
+}
+
+
 const char* v8::V8::GetVersion() {
   return i::Version::GetVersion();
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index e8afc5d..d95946e 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -49,6 +49,7 @@
 #endif
 unsigned CpuFeatures::supported_ = 0;
 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cache_line_size_ = 64;
 
 
 ExternalReference ExternalReference::cpu_features() {
@@ -125,6 +126,9 @@
         static_cast<uint64_t>(1) << VFP3 |
         static_cast<uint64_t>(1) << ARMv7;
   }
+  if (FLAG_enable_neon) {
+    supported_ |= 1u << NEON;
+  }
   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
   if (FLAG_enable_armv7) {
     supported_ |= static_cast<uint64_t>(1) << ARMv7;
@@ -157,6 +161,10 @@
         static_cast<uint64_t>(1) << ARMv7;
   }
 
+  if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
+    found_by_runtime_probing_only_ |= 1u << NEON;
+  }
+
   if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
   }
@@ -171,12 +179,18 @@
         static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
   }
 
-  if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
+  CpuImplementer implementer = OS::GetCpuImplementer();
+  if (implementer == QUALCOMM_IMPLEMENTER &&
       FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
     found_by_runtime_probing_only_ |=
         static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
   }
 
+  CpuPart part = OS::GetCpuPart(implementer);
+  if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
+    cache_line_size_ = 32;
+  }
+
   if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
       && OS::ArmCpuHasFeature(VFP32DREGS)) {
     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
@@ -247,11 +261,12 @@
 
 void CpuFeatures::PrintFeatures() {
   printf(
-    "ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
+    "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
     "MOVW_MOVT_IMMEDIATE_LOADS=%d",
     CpuFeatures::IsSupported(ARMv7),
     CpuFeatures::IsSupported(VFP3),
     CpuFeatures::IsSupported(VFP32DREGS),
+    CpuFeatures::IsSupported(NEON),
     CpuFeatures::IsSupported(SUDIV),
     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
     CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
@@ -378,6 +393,66 @@
 }
 
 
+NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
+  ASSERT((am == Offset) || (am == PostIndex));
+  rn_ = rn;
+  rm_ = (am == Offset) ? pc : sp;
+  SetAlignment(align);
+}
+
+
+NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
+  rn_ = rn;
+  rm_ = rm;
+  SetAlignment(align);
+}
+
+
+void NeonMemOperand::SetAlignment(int align) {
+  switch (align) {
+    case 0:
+      align_ = 0;
+      break;
+    case 64:
+      align_ = 1;
+      break;
+    case 128:
+      align_ = 2;
+      break;
+    case 256:
+      align_ = 3;
+      break;
+    default:
+      UNREACHABLE();
+      align_ = 0;
+      break;
+  }
+}
+
+
+NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
+  base_ = base;
+  switch (registers_count) {
+    case 1:
+      type_ = nlt_1;
+      break;
+    case 2:
+      type_ = nlt_2;
+      break;
+    case 3:
+      type_ = nlt_3;
+      break;
+    case 4:
+      type_ = nlt_4;
+      break;
+    default:
+      UNREACHABLE();
+      type_ = nlt_1;
+      break;
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Specific instructions, constants, and masks.
 
@@ -1546,6 +1621,107 @@
 }
 
 
+void Assembler::pkhbt(Register dst,
+                      Register src1,
+                      const Operand& src2,
+                      Condition cond ) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+  // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
+  ASSERT(!dst.is(pc));
+  ASSERT(!src1.is(pc));
+  ASSERT(!src2.rm().is(pc));
+  ASSERT(!src2.rm().is(no_reg));
+  ASSERT(src2.rs().is(no_reg));
+  ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
+  ASSERT(src2.shift_op() == LSL);
+  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+       src2.shift_imm_*B7 | B4 | src2.rm().code());
+}
+
+
+void Assembler::pkhtb(Register dst,
+                      Register src1,
+                      const Operand& src2,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+  // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
+  ASSERT(!dst.is(pc));
+  ASSERT(!src1.is(pc));
+  ASSERT(!src2.rm().is(pc));
+  ASSERT(!src2.rm().is(no_reg));
+  ASSERT(src2.rs().is(no_reg));
+  ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
+  ASSERT(src2.shift_op() == ASR);
+  int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
+  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+       asr*B7 | B6 | B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb(Register dst,
+                     const Operand& src,
+                     Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.274.
+  // cond(31-28) | 01101110(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  ASSERT(!dst.is(pc));
+  ASSERT(!src.rm().is(pc));
+  ASSERT(!src.rm().is(no_reg));
+  ASSERT(src.rs().is(no_reg));
+  ASSERT((src.shift_imm_ == 0) ||
+         (src.shift_imm_ == 8) ||
+         (src.shift_imm_ == 16) ||
+         (src.shift_imm_ == 24));
+  ASSERT(src.shift_op() == ROR);
+  emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
+       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
+void Assembler::uxtab(Register dst,
+                      Register src1,
+                      const Operand& src2,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.271.
+  // cond(31-28) | 01101110(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  ASSERT(!dst.is(pc));
+  ASSERT(!src1.is(pc));
+  ASSERT(!src2.rm().is(pc));
+  ASSERT(!src2.rm().is(no_reg));
+  ASSERT(src2.rs().is(no_reg));
+  ASSERT((src2.shift_imm_ == 0) ||
+         (src2.shift_imm_ == 8) ||
+         (src2.shift_imm_ == 16) ||
+         (src2.shift_imm_ == 24));
+  ASSERT(src2.shift_op() == ROR);
+  emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
+       ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb16(Register dst,
+                       const Operand& src,
+                       Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.275.
+  // cond(31-28) | 01101100(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  ASSERT(!dst.is(pc));
+  ASSERT(!src.rm().is(pc));
+  ASSERT(!src.rm().is(no_reg));
+  ASSERT(src.rs().is(no_reg));
+  ASSERT((src.shift_imm_ == 0) ||
+         (src.shift_imm_ == 8) ||
+         (src.shift_imm_ == 16) ||
+         (src.shift_imm_ == 24));
+  ASSERT(src.shift_op() == ROR);
+  emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
+       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
 // Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   ASSERT(!dst.is(pc));
@@ -1644,6 +1820,25 @@
 }
 
 
+// Preload instructions.
+void Assembler::pld(const MemOperand& address) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.128.
+  // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
+  // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
+  ASSERT(address.rm().is(no_reg));
+  ASSERT(address.am() == Offset);
+  int U = B23;
+  int offset = address.offset();
+  if (offset < 0) {
+    offset = -offset;
+    U = 0;
+  }
+  ASSERT(offset < 4096);
+  emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
+       0xf*B12 | offset);
+}
+
+
 // Load/Store multiple instructions.
 void Assembler::ldm(BlockAddrMode am,
                     Register base,
@@ -2707,6 +2902,50 @@
 }
 
 
+// Support for NEON.
+
+void Assembler::vld1(NeonSize size,
+                     const NeonListOperand& dst,
+                     const NeonMemOperand& src) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.320.
+  // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+  ASSERT(CpuFeatures::IsSupported(NEON));
+  int vd, d;
+  dst.base().split_code(&vd, &d);
+  emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
+       dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
+}
+
+
+void Assembler::vst1(NeonSize size,
+                     const NeonListOperand& src,
+                     const NeonMemOperand& dst) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.404.
+  // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
+  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+  ASSERT(CpuFeatures::IsSupported(NEON));
+  int vd, d;
+  src.base().split_code(&vd, &d);
+  emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
+       size*B6 | dst.align()*B4 | dst.rm().code());
+}
+
+
+void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.346.
+  // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
+  // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
+  ASSERT(CpuFeatures::IsSupported(NEON));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
+        (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
+}
+
+
 // Pseudo instructions.
 void Assembler::nop(int type) {
   // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 3000860..62dd94c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -78,12 +78,15 @@
             (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
   }
 
+  static unsigned cache_line_size() { return cache_line_size_; }
+
  private:
 #ifdef DEBUG
   static bool initialized_;
 #endif
   static unsigned supported_;
   static unsigned found_by_runtime_probing_only_;
+  static unsigned cache_line_size_;
 
   friend class ExternalReference;
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
@@ -301,6 +304,36 @@
 typedef DwVfpRegister DoubleRegister;
 
 
+// Quad word NEON register.
+struct QwNeonRegister {
+  static const int kMaxNumRegisters = 16;
+
+  static QwNeonRegister from_code(int code) {
+    QwNeonRegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const {
+    return (0 <= code_) && (code_ < kMaxNumRegisters);
+  }
+  bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
+  }
+  void split_code(int* vm, int* m) const {
+    ASSERT(is_valid());
+    *m = (code_ & 0x10) >> 4;
+    *vm = code_ & 0x0F;
+  }
+
+  int code_;
+};
+
+
+typedef QwNeonRegister QuadRegister;
+
+
 // Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
 const SwVfpRegister s0  = {  0 };
@@ -370,6 +403,23 @@
 const DwVfpRegister d30 = { 30 };
 const DwVfpRegister d31 = { 31 };
 
+const QwNeonRegister q0  = {  0 };
+const QwNeonRegister q1  = {  1 };
+const QwNeonRegister q2  = {  2 };
+const QwNeonRegister q3  = {  3 };
+const QwNeonRegister q4  = {  4 };
+const QwNeonRegister q5  = {  5 };
+const QwNeonRegister q6  = {  6 };
+const QwNeonRegister q7  = {  7 };
+const QwNeonRegister q8  = {  8 };
+const QwNeonRegister q9  = {  9 };
+const QwNeonRegister q10 = { 10 };
+const QwNeonRegister q11 = { 11 };
+const QwNeonRegister q12 = { 12 };
+const QwNeonRegister q13 = { 13 };
+const QwNeonRegister q14 = { 14 };
+const QwNeonRegister q15 = { 15 };
+
 // Aliases for double registers.  Defined using #define instead of
 // "static const DwVfpRegister&" because Clang complains otherwise when a
 // compilation unit that includes this header doesn't use the variables.
@@ -562,6 +612,42 @@
   friend class Assembler;
 };
 
+
+// Class NeonMemOperand represents a memory operand in load and
+// store NEON instructions
+class NeonMemOperand BASE_EMBEDDED {
+ public:
+  // [rn {:align}]       Offset
+  // [rn {:align}]!      PostIndex
+  explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
+
+  // [rn {:align}], rm   PostIndex
+  explicit NeonMemOperand(Register rn, Register rm, int align = 0);
+
+  Register rn() const { return rn_; }
+  Register rm() const { return rm_; }
+  int align() const { return align_; }
+
+ private:
+  void SetAlignment(int align);
+
+  Register rn_;  // base
+  Register rm_;  // register increment
+  int align_;
+};
+
+
+// Class NeonListOperand represents a list of NEON registers
+class NeonListOperand BASE_EMBEDDED {
+ public:
+  explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+  DoubleRegister base() const { return base_; }
+  NeonListType type() const { return type_; }
+ private:
+  DoubleRegister base_;
+  NeonListType type_;
+};
+
 extern const Instr kMovLrPc;
 extern const Instr kLdrPCMask;
 extern const Instr kLdrPCPattern;
@@ -866,6 +952,19 @@
   void bfi(Register dst, Register src, int lsb, int width,
            Condition cond = al);
 
+  void pkhbt(Register dst, Register src1, const Operand& src2,
+             Condition cond = al);
+
+  void pkhtb(Register dst, Register src1, const Operand& src2,
+             Condition cond = al);
+
+  void uxtb(Register dst, const Operand& src, Condition cond = al);
+
+  void uxtab(Register dst, Register src1, const Operand& src2,
+             Condition cond = al);
+
+  void uxtb16(Register dst, const Operand& src, Condition cond = al);
+
   // Status register access instructions
 
   void mrs(Register dst, SRegister s, Condition cond = al);
@@ -887,6 +986,9 @@
             Register src2,
             const MemOperand& dst, Condition cond = al);
 
+  // Preload instructions
+  void pld(const MemOperand& address);
+
   // Load/Store multiple instructions
   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
@@ -1097,6 +1199,17 @@
              const DwVfpRegister src,
              const Condition cond = al);
 
+  // Support for NEON.
+  // All these APIs support D0 to D31 and Q0 to Q15.
+
+  void vld1(NeonSize size,
+            const NeonListOperand& dst,
+            const NeonMemOperand& src);
+  void vst1(NeonSize size,
+            const NeonListOperand& src,
+            const NeonMemOperand& dst);
+  void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
+
   // Pseudo instructions
 
   // Different nop operations are used by the code generator to detect certain
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index ff754ec..8f8094f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -258,6 +258,17 @@
 }
 
 
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { r0, r3, r1, r2 };
+  descriptor->register_param_count_ = 4;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f411b13..4739541 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -112,6 +112,252 @@
 #endif
 }
 
+#if defined(V8_HOST_ARCH_ARM)
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+      OS::MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+  return stub;
+#else
+  if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+    return stub;
+  }
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return stub;
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  Register dest = r0;
+  Register src = r1;
+  Register chars = r2;
+  Register temp1 = r3;
+  Label less_4;
+
+  if (CpuFeatures::IsSupported(NEON)) {
+    Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
+    Label size_less_than_8;
+    __ pld(MemOperand(src, 0));
+
+    __ cmp(chars, Operand(8));
+    __ b(lt, &size_less_than_8);
+    __ cmp(chars, Operand(32));
+    __ b(lt, &less_32);
+    if (CpuFeatures::cache_line_size() == 32) {
+      __ pld(MemOperand(src, 32));
+    }
+    __ cmp(chars, Operand(64));
+    __ b(lt, &less_64);
+    __ pld(MemOperand(src, 64));
+    if (CpuFeatures::cache_line_size() == 32) {
+      __ pld(MemOperand(src, 96));
+    }
+    __ cmp(chars, Operand(128));
+    __ b(lt, &less_128);
+    __ pld(MemOperand(src, 128));
+    if (CpuFeatures::cache_line_size() == 32) {
+      __ pld(MemOperand(src, 160));
+    }
+    __ pld(MemOperand(src, 192));
+    if (CpuFeatures::cache_line_size() == 32) {
+      __ pld(MemOperand(src, 224));
+    }
+    __ cmp(chars, Operand(256));
+    __ b(lt, &less_256);
+    __ sub(chars, chars, Operand(256));
+
+    __ bind(&loop);
+    __ pld(MemOperand(src, 256));
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+    if (CpuFeatures::cache_line_size() == 32) {
+      __ pld(MemOperand(src, 256));
+    }
+    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+    __ sub(chars, chars, Operand(64), SetCC);
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+    __ b(ge, &loop);
+    __ add(chars, chars, Operand(256));
+
+    __ bind(&less_256);
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+    __ sub(chars, chars, Operand(128));
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+    __ cmp(chars, Operand(64));
+    __ b(lt, &less_64);
+
+    __ bind(&less_128);
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+    __ sub(chars, chars, Operand(64));
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+
+    __ bind(&less_64);
+    __ cmp(chars, Operand(32));
+    __ b(lt, &less_32);
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+    __ sub(chars, chars, Operand(32));
+
+    __ bind(&less_32);
+    __ cmp(chars, Operand(16));
+    __ b(le, &_16_or_less);
+    __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+    __ sub(chars, chars, Operand(16));
+
+    __ bind(&_16_or_less);
+    __ cmp(chars, Operand(8));
+    __ b(le, &_8_or_less);
+    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
+    __ sub(chars, chars, Operand(8));
+
+    // Do a last copy which may overlap with the previous copy (up to 8 bytes).
+    __ bind(&_8_or_less);
+    __ rsb(chars, chars, Operand(8));
+    __ sub(src, src, Operand(chars));
+    __ sub(dest, dest, Operand(chars));
+    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
+
+    __ Ret();
+
+    __ bind(&size_less_than_8);
+
+    __ bic(temp1, chars, Operand(0x3), SetCC);
+    __ b(&less_4, eq);
+    __ ldr(temp1, MemOperand(src, 4, PostIndex));
+    __ str(temp1, MemOperand(dest, 4, PostIndex));
+  } else {
+    Register temp2 = ip;
+    Label loop;
+
+    __ bic(temp2, chars, Operand(0x3), SetCC);
+    __ b(&less_4, eq);
+    __ add(temp2, dest, temp2);
+
+    __ bind(&loop);
+    __ ldr(temp1, MemOperand(src, 4, PostIndex));
+    __ str(temp1, MemOperand(dest, 4, PostIndex));
+    __ cmp(dest, temp2);
+    __ b(&loop, ne);
+  }
+
+  __ bind(&less_4);
+  __ mov(chars, Operand(chars, LSL, 31), SetCC);
+  // bit0 => Z (ne), bit1 => C (cs)
+  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
+  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
+  __ ldrb(temp1, MemOperand(src), ne);
+  __ strb(temp1, MemOperand(dest), ne);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+#endif
+}
+
+
+// Convert 8 to 16. The number of character to copy must be at least 8.
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+      OS::MemCopyUint16Uint8Function stub) {
+#if defined(USE_SIMULATOR)
+  return stub;
+#else
+  if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+    return stub;
+  }
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return stub;
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  Register dest = r0;
+  Register src = r1;
+  Register chars = r2;
+  if (CpuFeatures::IsSupported(NEON)) {
+    Register temp = r3;
+    Label loop;
+
+    __ bic(temp, chars, Operand(0x7));
+    __ sub(chars, chars, Operand(temp));
+    __ add(temp, dest, Operand(temp, LSL, 1));
+
+    __ bind(&loop);
+    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+    __ vmovl(NeonU8, q0, d0);
+    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+    __ cmp(dest, temp);
+    __ b(&loop, ne);
+
+    // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
+    __ rsb(chars, chars, Operand(8));
+    __ sub(src, src, Operand(chars));
+    __ sub(dest, dest, Operand(chars, LSL, 1));
+    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+    __ vmovl(NeonU8, q0, d0);
+    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
+    __ Ret();
+  } else {
+    Register temp1 = r3;
+    Register temp2 = ip;
+    Register temp3 = lr;
+    Register temp4 = r4;
+    Label loop;
+    Label not_two;
+
+    __ Push(lr, r4);
+    __ bic(temp2, chars, Operand(0x3));
+    __ add(temp2, dest, Operand(temp2, LSL, 1));
+
+    __ bind(&loop);
+    __ ldr(temp1, MemOperand(src, 4, PostIndex));
+    __ uxtb16(temp3, Operand(temp1, ROR, 0));
+    __ uxtb16(temp4, Operand(temp1, ROR, 8));
+    __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
+    __ str(temp1, MemOperand(dest));
+    __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
+    __ str(temp1, MemOperand(dest, 4));
+    __ add(dest, dest, Operand(8));
+    __ cmp(dest, temp2);
+    __ b(&loop, ne);
+
+    __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
+    __ b(&not_two, cc);
+    __ ldrh(temp1, MemOperand(src, 2, PostIndex));
+    __ uxtb(temp3, Operand(temp1, ROR, 8));
+    __ mov(temp3, Operand(temp3, LSL, 16));
+    __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+    __ str(temp3, MemOperand(dest, 4, PostIndex));
+    __ bind(&not_two);
+    __ ldrb(temp1, MemOperand(src), ne);
+    __ strh(temp1, MemOperand(dest), ne);
+    __ Pop(pc, r4);
+  }
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+
+  return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
+#endif
+}
+#endif
 
 #undef __
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e21055f..9bfccf8 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -33,22 +33,6 @@
 #error ARM EABI support is required.
 #endif
 
-#if defined(__ARM_ARCH_7A__) || \
-    defined(__ARM_ARCH_7R__) || \
-    defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#ifndef CAN_USE_VFP3_INSTRUCTIONS
-# define CAN_USE_VFP3_INSTRUCTIONS
-#endif
-#endif
-
-// Simulator should support unaligned access by default.
-#if !defined(__arm__)
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-#  define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-#endif
-
 namespace v8 {
 namespace internal {
 
@@ -331,6 +315,32 @@
 };
 
 
+// NEON data type
+enum NeonDataType {
+  NeonS8 = 0x1,   // U = 0, imm3 = 0b001
+  NeonS16 = 0x2,  // U = 0, imm3 = 0b010
+  NeonS32 = 0x4,  // U = 0, imm3 = 0b100
+  NeonU8 = 1 << 24 | 0x1,   // U = 1, imm3 = 0b001
+  NeonU16 = 1 << 24 | 0x2,  // U = 1, imm3 = 0b010
+  NeonU32 = 1 << 24 | 0x4,   // U = 1, imm3 = 0b100
+  NeonDataTypeSizeMask = 0x7,
+  NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType {
+  nlt_1 = 0x7,
+  nlt_2 = 0xA,
+  nlt_3 = 0x6,
+  nlt_4 = 0x2
+};
+
+enum NeonSize {
+  Neon8 = 0x0,
+  Neon16 = 0x1,
+  Neon32 = 0x2,
+  Neon64 = 0x4
+};
+
 // -----------------------------------------------------------------------------
 // Supervisor Call (svc) specific support.
 
@@ -573,6 +583,7 @@
   DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
 
   inline int TypeValue() const { return Bits(27, 25); }
+  inline int SpecialValue() const { return Bits(27, 23); }
 
   inline int RnValue() const { return Bits(19, 16); }
   DECLARE_STATIC_ACCESSOR(RnValue);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index ea3287a..780bafb 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -35,7 +35,7 @@
 namespace v8 {
 namespace internal {
 
-const int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 12;
 
 
 int Deoptimizer::patch_size() {
@@ -465,22 +465,12 @@
   // Get the bailout id from the stack.
   __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
 
-  // Get the address of the location in the code object if possible (r3) (return
+  // Get the address of the location in the code object (r3) (return
   // address for lazy deoptimization) and compute the fp-to-sp delta in
   // register r4.
-  if (type() == EAGER || type() == SOFT) {
-    __ mov(r3, Operand::Zero());
-    // Correct one word for bailout id.
-    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else if (type() == OSR) {
-    __ mov(r3, lr);
-    // Correct one word for bailout id.
-    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ mov(r3, lr);
-    // Correct two words for bailout id and return address.
-    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
+  __ mov(r3, lr);
+  // Correct one word for bailout id.
+  __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
   __ sub(r4, fp, r4);
 
   // Allocate a new deoptimizer object.
@@ -521,13 +511,8 @@
     __ vstr(d0, r1, dst_offset);
   }
 
-  // Remove the bailout id, eventually return address, and the saved registers
-  // from the stack.
-  if (type() == EAGER || type() == SOFT || type() == OSR) {
-    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
+  // Remove the bailout id and the saved registers from the stack.
+  __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
 
   // Compute a pointer to the unwinding limit in register r2; that is
   // the first stack slot not part of the input frame.
@@ -636,18 +621,12 @@
 
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
-  // Create a sequence of deoptimization entries. Note that any
-  // registers may be still live.
+  // Create a sequence of deoptimization entries.
+  // Note that registers are still live when jumping to an entry.
   Label done;
   for (int i = 0; i < count(); i++) {
     int start = masm()->pc_offset();
     USE(start);
-    if (type() == EAGER || type() == SOFT) {
-      __ nop();
-    } else {
-      // Emulate ia32 like call by pushing return address to stack.
-      __ push(lr);
-    }
     __ mov(ip, Operand(i));
     __ push(ip);
     __ b(&done);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index b0f3ec0..fd986fd 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -113,6 +113,8 @@
 
   // Handle formatting of instructions and their options.
   int FormatRegister(Instruction* instr, const char* option);
+  void FormatNeonList(int Vd, int type);
+  void FormatNeonMemory(int Rn, int align, int Rm);
   int FormatOption(Instruction* instr, const char* option);
   void Format(Instruction* instr, const char* format);
   void Unknown(Instruction* instr);
@@ -133,6 +135,8 @@
   void DecodeTypeVFP(Instruction* instr);
   void DecodeType6CoprocessorIns(Instruction* instr);
 
+  void DecodeSpecialCondition(Instruction* instr);
+
   void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
   void DecodeVCMP(Instruction* instr);
   void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -419,6 +423,41 @@
 }
 
 
+void Decoder::FormatNeonList(int Vd, int type) {
+  if (type == nlt_1) {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "{d%d}", Vd);
+  } else if (type == nlt_2) {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "{d%d, d%d}", Vd, Vd + 1);
+  } else if (type == nlt_3) {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+  } else if (type == nlt_4) {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                            "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+  }
+}
+
+
+void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                  "[r%d", Rn);
+  if (align != 0) {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    ":%d", (1 << align) << 6);
+  }
+  if (Rm == 15) {
+    Print("]");
+  } else if (Rm == 13) {
+    Print("]!");
+  } else {
+    out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "], r%d", Rm);
+  }
+}
+
+
 // Print the movw or movt instruction.
 void Decoder::PrintMovwMovt(Instruction* instr) {
   int imm = instr->ImmedMovwMovtValue();
@@ -982,15 +1021,107 @@
       break;
     }
     case ia_x: {
-      if (instr->HasW()) {
-        VERIFY(instr->Bits(5, 4) == 0x1);
-        if (instr->Bit(22) == 0x1) {
-          Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
-        } else {
-          UNREACHABLE();  // SSAT.
-        }
-      } else {
+      if (instr->Bit(4) == 0) {
         Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+      } else {
+        if (instr->Bit(5) == 0) {
+          switch (instr->Bits(22, 21)) {
+            case 0:
+              if (instr->Bit(20) == 0) {
+                if (instr->Bit(6) == 0) {
+                  Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+                } else {
+                  if (instr->Bits(11, 7) == 0) {
+                    Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+                  } else {
+                    Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
+              break;
+            case 1:
+              UNREACHABLE();
+              break;
+            case 2:
+              UNREACHABLE();
+              break;
+            case 3:
+              Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+              break;
+          }
+        } else {
+          switch (instr->Bits(22, 21)) {
+            case 0:
+              UNREACHABLE();
+              break;
+            case 1:
+              UNREACHABLE();
+              break;
+            case 2:
+              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+                if (instr->Bits(19, 16) == 0xF) {
+                  switch (instr->Bits(11, 10)) {
+                    case 0:
+                      Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+                      break;
+                    case 1:
+                      Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+                      break;
+                    case 2:
+                      Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+                      break;
+                    case 3:
+                      Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+                      break;
+                  }
+                } else {
+                  UNREACHABLE();
+                }
+              } else {
+                UNREACHABLE();
+              }
+              break;
+            case 3:
+              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+                if (instr->Bits(19, 16) == 0xF) {
+                  switch (instr->Bits(11, 10)) {
+                    case 0:
+                      Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+                      break;
+                    case 1:
+                      Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+                      break;
+                    case 2:
+                      Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+                      break;
+                    case 3:
+                      Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+                      break;
+                  }
+                } else {
+                  switch (instr->Bits(11, 10)) {
+                    case 0:
+                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+                      break;
+                    case 1:
+                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+                      break;
+                    case 2:
+                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+                      break;
+                    case 3:
+                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+                      break;
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
+              break;
+          }
+        }
       }
       break;
     }
@@ -1423,6 +1554,91 @@
   }
 }
 
+
+void Decoder::DecodeSpecialCondition(Instruction* instr) {
+  switch (instr->SpecialValue()) {
+    case 5:
+      if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+          (instr->Bit(4) == 1)) {
+        // vmovl signed
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+        int imm3 = instr->Bits(21, 19);
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+      } else {
+        Unknown(instr);
+      }
+      break;
+    case 7:
+      if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+          (instr->Bit(4) == 1)) {
+        // vmovl unsigned
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+        int imm3 = instr->Bits(21, 19);
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+      } else {
+        Unknown(instr);
+      }
+      break;
+    case 8:
+      if (instr->Bits(21, 20) == 0) {
+        // vst1
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Rn = instr->VnValue();
+        int type = instr->Bits(11, 8);
+        int size = instr->Bits(7, 6);
+        int align = instr->Bits(5, 4);
+        int Rm = instr->VmValue();
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vst1.%d ", (1 << size) << 3);
+        FormatNeonList(Vd, type);
+        Print(", ");
+        FormatNeonMemory(Rn, align, Rm);
+      } else if (instr->Bits(21, 20) == 2) {
+        // vld1
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Rn = instr->VnValue();
+        int type = instr->Bits(11, 8);
+        int size = instr->Bits(7, 6);
+        int align = instr->Bits(5, 4);
+        int Rm = instr->VmValue();
+        out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vld1.%d ", (1 << size) << 3);
+        FormatNeonList(Vd, type);
+        Print(", ");
+        FormatNeonMemory(Rn, align, Rm);
+      } else {
+        Unknown(instr);
+      }
+      break;
+    case 0xA:
+    case 0xB:
+      if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+        int Rn = instr->Bits(19, 16);
+        int offset = instr->Bits(11, 0);
+        if (offset == 0) {
+          out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "pld [r%d]", Rn);
+        } else if (instr->Bit(23) == 0) {
+          out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "pld [r%d, #-%d]", Rn, offset);
+        } else {
+          out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "pld [r%d, #+%d]", Rn, offset);
+        }
+      } else {
+        Unknown(instr);
+      }
+      break;
+    default:
+      Unknown(instr);
+      break;
+  }
+}
+
 #undef VERIFIY
 
 bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
@@ -1449,7 +1665,7 @@
                                   "%08x       ",
                                   instr->InstructionBits());
   if (instr->ConditionField() == kSpecialCondition) {
-    Unknown(instr);
+    DecodeSpecialCondition(instr);
     return Instruction::kInstrSize;
   }
   int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 56332ee..309f96a 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -325,7 +325,6 @@
   stream->Add("= ");
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
-  ASSERT(hydrogen()->property_cell()->value()->IsSmi());
   ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index a07bbcd..8c90831 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -343,8 +343,7 @@
   }
   Label table_start;
   __ bind(&table_start);
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
+  Label needs_frame;
   for (int i = 0; i < deopt_jump_table_.length(); i++) {
     __ bind(&deopt_jump_table_[i].label);
     Address entry = deopt_jump_table_[i].address;
@@ -357,45 +356,24 @@
     }
     if (deopt_jump_table_[i].needs_frame) {
       __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
-      if (type == Deoptimizer::LAZY) {
-        if (needs_frame_is_call.is_bound()) {
-          __ b(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ add(fp, sp, Operand(2 * kPointerSize));
-          __ mov(lr, Operand(pc), LeaveCC, al);
-          __ mov(pc, ip);
-        }
+      if (needs_frame.is_bound()) {
+        __ b(&needs_frame);
       } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ b(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ add(fp, sp, Operand(2 * kPointerSize));
-          __ mov(pc, ip);
-        }
+        __ bind(&needs_frame);
+        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+        // This variant of deopt can only be used with stubs. Since we don't
+        // have a function pointer to install in the stack frame that we're
+        // building, install a special marker there instead.
+        ASSERT(info()->IsStub());
+        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ add(fp, sp, Operand(2 * kPointerSize));
+        __ mov(lr, Operand(pc), LeaveCC, al);
+        __ mov(pc, ip);
       }
     } else {
-      if (type == Deoptimizer::LAZY) {
-        __ mov(lr, Operand(pc), LeaveCC, al);
-        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
-      } else {
-        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
-      }
+      __ mov(lr, Operand(pc), LeaveCC, al);
+      __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
     }
     masm()->CheckConstPool(false, false);
   }
@@ -803,13 +781,8 @@
   }
 
   ASSERT(info()->IsStub() || frame_is_built_);
-  bool needs_lazy_deopt = info()->IsStub();
   if (cc == al && frame_is_built_) {
-    if (needs_lazy_deopt) {
-      __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    } else {
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-    }
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index b07e7be..00af777 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -919,6 +919,54 @@
 }
 
 
+void Simulator::get_d_register(int dreg, uint64_t* value) {
+  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+
+void Simulator::set_d_register(int dreg, const uint64_t* value) {
+  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+
+void Simulator::get_d_register(int dreg, uint32_t* value) {
+  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_d_register(int dreg, const uint32_t* value) {
+  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint64_t* value) {
+  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint64_t* value) {
+  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint32_t* value) {
+  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint32_t* value) {
+  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+
 // Raw access to the PC register.
 void Simulator::set_pc(int32_t value) {
   pc_modified_ = true;
@@ -2599,36 +2647,148 @@
       break;
     }
     case ia_x: {
-      if (instr->HasW()) {
-        ASSERT(instr->Bits(5, 4) == 0x1);
-
-        if (instr->Bit(22) == 0x1) {  // USAT.
-          int32_t sat_pos = instr->Bits(20, 16);
-          int32_t sat_val = (1 << sat_pos) - 1;
-          int32_t shift = instr->Bits(11, 7);
-          int32_t shift_type = instr->Bit(6);
-          int32_t rm_val = get_register(instr->RmValue());
-          if (shift_type == 0) {  // LSL
-            rm_val <<= shift;
-          } else {  // ASR
-            rm_val >>= shift;
+      if (instr->Bit(4) == 0) {
+        // Memop.
+      } else {
+        if (instr->Bit(5) == 0) {
+          switch (instr->Bits(22, 21)) {
+            case 0:
+              if (instr->Bit(20) == 0) {
+                if (instr->Bit(6) == 0) {
+                  // Pkhbt.
+                  uint32_t rn_val = get_register(rn);
+                  uint32_t rm_val = get_register(instr->RmValue());
+                  int32_t shift = instr->Bits(11, 7);
+                  rm_val <<= shift;
+                  set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+                } else {
+                  // Pkhtb.
+                  uint32_t rn_val = get_register(rn);
+                  int32_t rm_val = get_register(instr->RmValue());
+                  int32_t shift = instr->Bits(11, 7);
+                  if (shift == 0) {
+                    shift = 32;
+                  }
+                  rm_val >>= shift;
+                  set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+                }
+              } else {
+                UNIMPLEMENTED();
+              }
+              break;
+            case 1:
+              UNIMPLEMENTED();
+              break;
+            case 2:
+              UNIMPLEMENTED();
+              break;
+            case 3: {
+              // Usat.
+              int32_t sat_pos = instr->Bits(20, 16);
+              int32_t sat_val = (1 << sat_pos) - 1;
+              int32_t shift = instr->Bits(11, 7);
+              int32_t shift_type = instr->Bit(6);
+              int32_t rm_val = get_register(instr->RmValue());
+              if (shift_type == 0) {  // LSL
+                rm_val <<= shift;
+              } else {  // ASR
+                rm_val >>= shift;
+              }
+              // If saturation occurs, the Q flag should be set in the CPSR.
+              // There is no Q flag yet, and no instruction (MRS) to read the
+              // CPSR directly.
+              if (rm_val > sat_val) {
+                rm_val = sat_val;
+              } else if (rm_val < 0) {
+                rm_val = 0;
+              }
+              set_register(rd, rm_val);
+              break;
+            }
           }
-          // If saturation occurs, the Q flag should be set in the CPSR.
-          // There is no Q flag yet, and no instruction (MRS) to read the
-          // CPSR directly.
-          if (rm_val > sat_val) {
-            rm_val = sat_val;
-          } else if (rm_val < 0) {
-            rm_val = 0;
+        } else {
+          switch (instr->Bits(22, 21)) {
+            case 0:
+              UNIMPLEMENTED();
+              break;
+            case 1:
+              UNIMPLEMENTED();
+              break;
+            case 2:
+              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+                if (instr->Bits(19, 16) == 0xF) {
+                  // Uxtb16.
+                  uint32_t rm_val = get_register(instr->RmValue());
+                  int32_t rotate = instr->Bits(11, 10);
+                  switch (rotate) {
+                    case 0:
+                      break;
+                    case 1:
+                      rm_val = (rm_val >> 8) | (rm_val << 24);
+                      break;
+                    case 2:
+                      rm_val = (rm_val >> 16) | (rm_val << 16);
+                      break;
+                    case 3:
+                      rm_val = (rm_val >> 24) | (rm_val << 8);
+                      break;
+                  }
+                  set_register(rd,
+                               (rm_val & 0xFF) | (rm_val & 0xFF0000));
+                } else {
+                  UNIMPLEMENTED();
+                }
+              } else {
+                UNIMPLEMENTED();
+              }
+              break;
+            case 3:
+              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+                if (instr->Bits(19, 16) == 0xF) {
+                  // Uxtb.
+                  uint32_t rm_val = get_register(instr->RmValue());
+                  int32_t rotate = instr->Bits(11, 10);
+                  switch (rotate) {
+                    case 0:
+                      break;
+                    case 1:
+                      rm_val = (rm_val >> 8) | (rm_val << 24);
+                      break;
+                    case 2:
+                      rm_val = (rm_val >> 16) | (rm_val << 16);
+                      break;
+                    case 3:
+                      rm_val = (rm_val >> 24) | (rm_val << 8);
+                      break;
+                  }
+                  set_register(rd, (rm_val & 0xFF));
+                } else {
+                  // Uxtab.
+                  uint32_t rn_val = get_register(rn);
+                  uint32_t rm_val = get_register(instr->RmValue());
+                  int32_t rotate = instr->Bits(11, 10);
+                  switch (rotate) {
+                    case 0:
+                      break;
+                    case 1:
+                      rm_val = (rm_val >> 8) | (rm_val << 24);
+                      break;
+                    case 2:
+                      rm_val = (rm_val >> 16) | (rm_val << 16);
+                      break;
+                    case 3:
+                      rm_val = (rm_val >> 24) | (rm_val << 8);
+                      break;
+                  }
+                  set_register(rd, rn_val + (rm_val & 0xFF));
+                }
+              } else {
+                UNIMPLEMENTED();
+              }
+              break;
           }
-          set_register(rd, rm_val);
-        } else {  // SSAT.
-          UNIMPLEMENTED();
         }
         return;
-      } else {
-        Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
-        UNIMPLEMENTED();
       }
       break;
     }
@@ -3352,6 +3512,156 @@
 }
 
 
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+  switch (instr->SpecialValue()) {
+    case 5:
+      if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+          (instr->Bit(4) == 1)) {
+        // vmovl signed
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+        int imm3 = instr->Bits(21, 19);
+        if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+        int esize = 8 * imm3;
+        int elements = 64 / esize;
+        int8_t from[8];
+        get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+        int16_t to[8];
+        int e = 0;
+        while (e < elements) {
+          to[e] = from[e];
+          e++;
+        }
+        set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    case 7:
+      if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+          (instr->Bit(4) == 1)) {
+        // vmovl unsigned
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+        int imm3 = instr->Bits(21, 19);
+        if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+        int esize = 8 * imm3;
+        int elements = 64 / esize;
+        uint8_t from[8];
+        get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+        uint16_t to[8];
+        int e = 0;
+        while (e < elements) {
+          to[e] = from[e];
+          e++;
+        }
+        set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    case 8:
+      if (instr->Bits(21, 20) == 0) {
+        // vst1
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Rn = instr->VnValue();
+        int type = instr->Bits(11, 8);
+        int Rm = instr->VmValue();
+        int32_t address = get_register(Rn);
+        int regs = 0;
+        switch (type) {
+          case nlt_1:
+            regs = 1;
+            break;
+          case nlt_2:
+            regs = 2;
+            break;
+          case nlt_3:
+            regs = 3;
+            break;
+          case nlt_4:
+            regs = 4;
+            break;
+          default:
+            UNIMPLEMENTED();
+            break;
+        }
+        int r = 0;
+        while (r < regs) {
+          uint32_t data[2];
+          get_d_register(Vd + r, data);
+          WriteW(address, data[0], instr);
+          WriteW(address + 4, data[1], instr);
+          address += 8;
+          r++;
+        }
+        if (Rm != 15) {
+          if (Rm == 13) {
+            set_register(Rn, address);
+          } else {
+            set_register(Rn, get_register(Rn) + get_register(Rm));
+          }
+        }
+      } else if (instr->Bits(21, 20) == 2) {
+        // vld1
+        int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+        int Rn = instr->VnValue();
+        int type = instr->Bits(11, 8);
+        int Rm = instr->VmValue();
+        int32_t address = get_register(Rn);
+        int regs = 0;
+        switch (type) {
+          case nlt_1:
+            regs = 1;
+            break;
+          case nlt_2:
+            regs = 2;
+            break;
+          case nlt_3:
+            regs = 3;
+            break;
+          case nlt_4:
+            regs = 4;
+            break;
+          default:
+            UNIMPLEMENTED();
+            break;
+        }
+        int r = 0;
+        while (r < regs) {
+          uint32_t data[2];
+          data[0] = ReadW(address, instr);
+          data[1] = ReadW(address + 4, instr);
+          set_d_register(Vd + r, data);
+          address += 8;
+          r++;
+        }
+        if (Rm != 15) {
+          if (Rm == 13) {
+            set_register(Rn, address);
+          } else {
+            set_register(Rn, get_register(Rn) + get_register(Rm));
+          }
+        }
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    case 0xA:
+    case 0xB:
+      if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+        // pld: ignore instruction.
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+}
+
+
 // Executes the current instruction.
 void Simulator::InstructionDecode(Instruction* instr) {
   if (v8::internal::FLAG_check_icache) {
@@ -3368,7 +3678,7 @@
     PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
   }
   if (instr->ConditionField() == kSpecialCondition) {
-    UNIMPLEMENTED();
+    DecodeSpecialCondition(instr);
   } else if (ConditionallyExecute(instr)) {
     switch (instr->TypeValue()) {
       case 0:
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 45ae999..2a458f9 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -144,7 +144,10 @@
     d8, d9, d10, d11, d12, d13, d14, d15,
     d16, d17, d18, d19, d20, d21, d22, d23,
     d24, d25, d26, d27, d28, d29, d30, d31,
-    num_d_registers = 32
+    num_d_registers = 32,
+    q0 = 0, q1, q2, q3, q4, q5, q6, q7,
+    q8, q9, q10, q11, q12, q13, q14, q15,
+    num_q_registers = 16
   };
 
   explicit Simulator(Isolate* isolate);
@@ -163,6 +166,15 @@
   void set_dw_register(int dreg, const int* dbl);
 
   // Support for VFP.
+  void get_d_register(int dreg, uint64_t* value);
+  void set_d_register(int dreg, const uint64_t* value);
+  void get_d_register(int dreg, uint32_t* value);
+  void set_d_register(int dreg, const uint32_t* value);
+  void get_q_register(int qreg, uint64_t* value);
+  void set_q_register(int qreg, const uint64_t* value);
+  void get_q_register(int qreg, uint32_t* value);
+  void set_q_register(int qreg, const uint32_t* value);
+
   void set_s_register(int reg, unsigned int value);
   unsigned int get_s_register(int reg) const;
 
@@ -328,6 +340,7 @@
   // Support for VFP.
   void DecodeTypeVFP(Instruction* instr);
   void DecodeType6CoprocessorIns(Instruction* instr);
+  void DecodeSpecialCondition(Instruction* instr);
 
   void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
   void DecodeVCMP(Instruction* instr);
diff --git a/src/array-iterator.js b/src/array-iterator.js
new file mode 100644
index 0000000..8f1ab47
--- /dev/null
+++ b/src/array-iterator.js
@@ -0,0 +1,127 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var ARRAY_ITERATOR_KIND_KEYS = 1;
+var ARRAY_ITERATOR_KIND_VALUES = 2;
+var ARRAY_ITERATOR_KIND_ENTRIES = 3;
+// The spec draft also has "sparse" but it is never used.
+
+var iteratorObjectSymbol = %CreateSymbol(void 0);
+var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
+var arrayIterationKindSymbol = %CreateSymbol(void 0);
+
+function ArrayIterator() {}
+
+// 15.4.5.1 CreateArrayIterator Abstract Operation
+function CreateArrayIterator(array, kind) {
+  var object = ToObject(array);
+  var iterator = new ArrayIterator;
+  iterator[iteratorObjectSymbol] = object;
+  iterator[arrayIteratorNextIndexSymbol] = 0;
+  iterator[arrayIterationKindSymbol] = kind;
+  return iterator;
+}
+
+// 15.19.4.3.4 CreateItrResultObject
+function CreateIteratorResultObject(value, done) {
+  return {value: value, done: done};
+}
+
+// 15.4.5.2.2 ArrayIterator.prototype.next( )
+function ArrayIteratorNext() {
+  var iterator = ToObject(this);
+  var array = iterator[iteratorObjectSymbol];
+  if (!array) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Array Iterator.prototype.next']);
+  }
+
+  var index = iterator[arrayIteratorNextIndexSymbol];
+  var itemKind = iterator[arrayIterationKindSymbol];
+  var length = TO_UINT32(array.length);
+
+  // "sparse" is never used.
+
+  if (index >= length) {
+    iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
+    return CreateIteratorResultObject(void 0, true);
+  }
+
+  var elementKey = ToString(index);
+  iterator[arrayIteratorNextIndexSymbol] = index + 1;
+
+  if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
+    return CreateIteratorResultObject(array[elementKey], false);
+
+  if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
+    return CreateIteratorResultObject([elementKey, array[elementKey]], false);
+
+  return CreateIteratorResultObject(elementKey, false);
+}
+
+function ArrayEntries() {
+  return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES);
+}
+
+function ArrayValues() {
+  return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES);
+}
+
+function ArrayKeys() {
+  return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS);
+}
+
+function SetUpArrayIterator() {
+  %CheckIsBootstrapping();
+
+  %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
+  %FunctionSetReadOnlyPrototype(ArrayIterator);
+
+  InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
+    'next', ArrayIteratorNext
+  ));
+}
+
+SetUpArrayIterator();
+
+function ExtendArrayPrototype() {
+  %CheckIsBootstrapping();
+
+  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+    'entries', ArrayEntries,
+    'values', ArrayValues,
+    'keys', ArrayKeys
+  ));
+}
+
+ExtendArrayPrototype();
diff --git a/src/ast.h b/src/ast.h
index 6336b3a..78b7843 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -357,10 +357,8 @@
   bool IsUndefinedLiteral();
 
   // Expression type bounds
-  Handle<Type> upper_type() { return upper_type_; }
-  Handle<Type> lower_type() { return lower_type_; }
-  void set_upper_type(Handle<Type> type) { upper_type_ = type; }
-  void set_lower_type(Handle<Type> type) { lower_type_ = type; }
+  Bounds bounds() { return bounds_; }
+  void set_bounds(Bounds bounds) { bounds_ = bounds; }
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
@@ -391,15 +389,13 @@
 
  protected:
   explicit Expression(Isolate* isolate)
-      : upper_type_(Type::Any(), isolate),
-        lower_type_(Type::None(), isolate),
+      : bounds_(Type::None(), Type::Any(), isolate),
         id_(GetNextId(isolate)),
         test_id_(GetNextId(isolate)) {}
   void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
 
  private:
-  Handle<Type> upper_type_;
-  Handle<Type> lower_type_;
+  Bounds bounds_;
   byte to_boolean_types_;
 
   const BailoutId id_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index dd3e94b..249b31c 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -2072,6 +2072,11 @@
                "native generator.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
+    if (FLAG_harmony_iteration &&
+        strcmp(ExperimentalNatives::GetScriptName(i).start(),
+               "native array-iterator.js") == 0) {
+      if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+    }
   }
 
   InstallExperimentalNativeFunctions();
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index d8eaae0..d5bb5e7 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -784,8 +784,7 @@
   CompareNilICStub* stub = casted_stub();
   HIfContinuation continuation;
   Handle<Map> sentinel_map(isolate->heap()->meta_map());
-  Handle<Type> type =
-      CompareNilICStub::StateToType(isolate, stub->GetState(), sentinel_map);
+  Handle<Type> type = stub->GetType(isolate, sentinel_map);
   BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
   IfBuilder if_nil(this, &continuation);
   if_nil.Then();
@@ -909,4 +908,75 @@
 }
 
 
+template<>
+HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
+  ElementsTransitionAndStoreStub* stub = casted_stub();
+  ElementsKind from_kind = stub->from();
+  ElementsKind to_kind = stub->to();
+
+  HValue* value = GetParameter(0);
+  HValue* target_map = GetParameter(1);
+  HValue* key = GetParameter(2);
+  HValue* object = GetParameter(3);
+
+  if (FLAG_trace_elements_transitions) {
+    // Tracing elements transitions is the job of the runtime.
+    current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+    set_current_block(NULL);
+    return value;
+  }
+
+  info()->MarkAsSavesCallerDoubles();
+
+  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+    Add<HTrapAllocationMemento>(object);
+  }
+
+  // Check if we need to transition the array elements first
+  // (either SMI -> Double or Double -> Object).
+  if (DoesTransitionChangeElementsBufferFormat(from_kind, to_kind)) {
+    HInstruction* array_length = NULL;
+    if (stub->is_jsarray()) {
+      array_length = AddLoad(object, HObjectAccess::ForArrayLength());
+    } else {
+      array_length = AddLoadFixedArrayLength(AddLoadElements(object));
+    }
+    array_length->set_type(HType::Smi());
+
+    IfBuilder if_builder(this);
+
+    // Check if we have any elements.
+    if_builder.IfNot<HCompareNumericAndBranch>(array_length,
+                                               graph()->GetConstant0(),
+                                               Token::EQ);
+    if_builder.Then();
+
+    HInstruction* elements = AddLoadElements(object);
+
+    HInstruction* elements_length = AddLoadFixedArrayLength(elements);
+
+    BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
+                              array_length, elements_length);
+
+    if_builder.End();
+  }
+
+  // Set transitioned map.
+  AddStore(object, HObjectAccess::ForMap(), target_map);
+
+  // Generate the actual store.
+  BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
+                                          stub->is_jsarray(), to_kind,
+                                          true, ALLOW_RETURN_HOLE,
+                                          stub->store_mode());
+
+  return value;
+}
+
+
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index bfc71fc..5f6616e 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -534,7 +534,7 @@
 
 
 void CompareNilICStub::UpdateStatus(Handle<Object> object) {
-  ASSERT(state_ != State::Generic());
+  ASSERT(!state_.Contains(GENERIC));
   State old_state(state_);
   if (object->IsNull()) {
     state_.Add(NULL_TYPE);
@@ -543,9 +543,11 @@
   } else if (object->IsUndetectableObject() ||
              object->IsOddball() ||
              !object->IsHeapObject()) {
-    state_ = State::Generic();
+    state_.RemoveAll();
+    state_.Add(GENERIC);
   } else if (IsMonomorphic()) {
-    state_ = State::Generic();
+    state_.RemoveAll();
+    state_.Add(GENERIC);
   } else {
     state_.Add(MONOMORPHIC_MAP);
   }
@@ -592,33 +594,28 @@
   if (Contains(UNDEFINED)) printer.Add("Undefined");
   if (Contains(NULL_TYPE)) printer.Add("Null");
   if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
-  if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
   if (Contains(GENERIC)) printer.Add("Generic");
   stream->Add(")");
 }
 
 
-Handle<Type> CompareNilICStub::StateToType(
+Handle<Type> CompareNilICStub::GetType(
     Isolate* isolate,
-    State state,
     Handle<Map> map) {
-  if (state.Contains(CompareNilICStub::GENERIC)) {
+  if (state_.Contains(CompareNilICStub::GENERIC)) {
     return handle(Type::Any(), isolate);
   }
 
   Handle<Type> result(Type::None(), isolate);
-  if (state.Contains(CompareNilICStub::UNDEFINED)) {
+  if (state_.Contains(CompareNilICStub::UNDEFINED)) {
     result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
                     isolate);
   }
-  if (state.Contains(CompareNilICStub::NULL_TYPE)) {
+  if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
     result = handle(Type::Union(result, handle(Type::Null(), isolate)),
                     isolate);
   }
-  if (state.Contains(CompareNilICStub::UNDETECTABLE)) {
-    result = handle(Type::Union(result, handle(Type::Undetectable(), isolate)),
-                    isolate);
-  } else if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+  if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
     Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
     result = handle(Type::Union(result, handle(type, isolate)), isolate);
   }
@@ -627,6 +624,16 @@
 }
 
 
+Handle<Type> CompareNilICStub::GetInputType(
+    Isolate* isolate,
+    Handle<Map> map) {
+  Handle<Type> output_type = GetType(isolate, map);
+  Handle<Type> nil_type = handle(nil_value_ == kNullValue
+      ? Type::Null() : Type::Undefined(), isolate);
+  return handle(Type::Union(output_type, nil_type), isolate);
+}
+
+
 void InstanceofStub::PrintName(StringStream* stream) {
   const char* args = "";
   if (HasArgsInRegisters()) {
@@ -811,44 +818,6 @@
 }
 
 
-void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
-  Label fail;
-  AllocationSiteMode mode = AllocationSite::GetMode(from_, to_);
-  ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
-  if (!FLAG_trace_elements_transitions) {
-    if (IsFastSmiOrObjectElementsKind(to_)) {
-      if (IsFastSmiOrObjectElementsKind(from_)) {
-        ElementsTransitionGenerator::
-            GenerateMapChangeElementsTransition(masm, mode, &fail);
-      } else if (IsFastDoubleElementsKind(from_)) {
-        ASSERT(!IsFastSmiElementsKind(to_));
-        ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
-      } else {
-        UNREACHABLE();
-      }
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
-                                                       is_jsarray_,
-                                                       to_,
-                                                       store_mode_);
-    } else if (IsFastSmiElementsKind(from_) &&
-               IsFastDoubleElementsKind(to_)) {
-      ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
-      KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
-                                                             is_jsarray_,
-                                                             store_mode_);
-    } else if (IsFastDoubleElementsKind(from_)) {
-      ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
-      ElementsTransitionGenerator::
-          GenerateMapChangeElementsTransition(masm, mode, &fail);
-    } else {
-      UNREACHABLE();
-    }
-  }
-  masm->bind(&fail);
-  KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
-}
-
-
 void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
   StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
   StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index e4cbe18..6f9d99e 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -72,6 +72,7 @@
   V(ArgumentsAccess)                     \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
+  V(DoubleToI)                           \
   V(CEntry)                              \
   V(JSEntry)                             \
   V(KeyedLoadElement)                    \
@@ -1260,50 +1261,17 @@
 
 class CompareNilICStub : public HydrogenCodeStub  {
  public:
-  enum CompareNilType {
-    UNDEFINED,
-    NULL_TYPE,
-    MONOMORPHIC_MAP,
-    UNDETECTABLE,
-    GENERIC,
-    NUMBER_OF_TYPES
-  };
+  Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
+  Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
 
-  class State : public EnumSet<CompareNilType, byte> {
-   public:
-    State() : EnumSet<CompareNilType, byte>(0) { }
-    explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
-
-    static State Generic() {
-      State set;
-      set.Add(UNDEFINED);
-      set.Add(NULL_TYPE);
-      set.Add(UNDETECTABLE);
-      set.Add(GENERIC);
-      return set;
-    }
-
-    void Print(StringStream* stream) const;
-  };
-
-  static Handle<Type> StateToType(
-      Isolate* isolate, State state, Handle<Map> map = Handle<Map>());
-
-  // At most 6 different types can be distinguished, because the Code object
-  // only has room for a single byte to hold a set and there are two more
-  // boolean flags we need to store. :-P
-  STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
-
-  CompareNilICStub(NilValue nil, State state = State())
-      : nil_value_(nil), state_(state) {
-  }
+  explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
 
   CompareNilICStub(Code::ExtraICState ic_state,
                    InitializationState init_state = INITIALIZED)
-      : HydrogenCodeStub(init_state) {
-    nil_value_ = NilValueField::decode(ic_state);
-    state_ = State(ExtractTypesFromExtraICState(ic_state));
-  }
+      : HydrogenCodeStub(init_state),
+        nil_value_(NilValueField::decode(ic_state)),
+        state_(State(TypesField::decode(ic_state))) {
+      }
 
   static Handle<Code> GetUninitialized(Isolate* isolate,
                                        NilValue nil) {
@@ -1322,7 +1290,7 @@
   }
 
   virtual InlineCacheState GetICState() {
-    if (state_ == State::Generic()) {
+    if (state_.Contains(GENERIC)) {
       return MEGAMORPHIC;
     } else if (state_.Contains(MONOMORPHIC_MAP)) {
       return MONOMORPHIC;
@@ -1335,22 +1303,15 @@
 
   Handle<Code> GenerateCode();
 
-  // extra ic state = nil_value | type_n-1 | ... | type_0
   virtual Code::ExtraICState GetExtraICState() {
-    return NilValueField::encode(nil_value_) | state_.ToIntegral();
-  }
-  static byte ExtractTypesFromExtraICState(Code::ExtraICState state) {
-    return state & ((1 << NUMBER_OF_TYPES) - 1);
-  }
-  static NilValue ExtractNilValueFromExtraICState(Code::ExtraICState state) {
-    return NilValueField::decode(state);
+    return NilValueField::encode(nil_value_) |
+           TypesField::encode(state_.ToIntegral());
   }
 
   void UpdateStatus(Handle<Object> object);
 
   bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
   NilValue GetNilValue() const { return nil_value_; }
-  State GetState() const { return state_; }
   void ClearState() { state_.RemoveAll(); }
 
   virtual void PrintState(StringStream* stream);
@@ -1359,12 +1320,32 @@
  private:
   friend class CompareNilIC;
 
-  CompareNilICStub(NilValue nil, InitializationState init_state)
-      : HydrogenCodeStub(init_state) {
-    nil_value_ = nil;
-  }
+  enum CompareNilType {
+    UNDEFINED,
+    NULL_TYPE,
+    MONOMORPHIC_MAP,
+    GENERIC,
+    NUMBER_OF_TYPES
+  };
 
-  class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
+  // At most 6 different types can be distinguished, because the Code object
+  // only has room for a single byte to hold a set and there are two more
+  // boolean flags we need to store. :-P
+  STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
+
+  class State : public EnumSet<CompareNilType, byte> {
+   public:
+    State() : EnumSet<CompareNilType, byte>(0) { }
+    explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
+
+    void Print(StringStream* stream) const;
+  };
+
+  CompareNilICStub(NilValue nil, InitializationState init_state)
+      : HydrogenCodeStub(init_state), nil_value_(nil) { }
+
+  class NilValueField : public BitField<NilValue, 0, 1> {};
+  class TypesField    : public BitField<byte,     1, NUMBER_OF_TYPES> {};
 
   virtual CodeStub::Major MajorKey() { return CompareNilIC; }
   virtual int NotMissMinorKey() { return GetExtraICState(); }
@@ -1766,6 +1747,60 @@
 };
 
 
+class DoubleToIStub : public PlatformCodeStub {
+ public:
+  DoubleToIStub(Register source,
+                Register destination,
+                int offset,
+                bool is_truncating) : bit_field_(0) {
+    bit_field_ = SourceRegisterBits::encode(source.code_) |
+      DestinationRegisterBits::encode(destination.code_) |
+      OffsetBits::encode(offset) |
+      IsTruncatingBits::encode(is_truncating);
+  }
+
+  Register source() {
+    Register result = { SourceRegisterBits::decode(bit_field_) };
+    return result;
+  }
+
+  Register destination() {
+    Register result = { DestinationRegisterBits::decode(bit_field_) };
+    return result;
+  }
+
+  bool is_truncating() {
+    return IsTruncatingBits::decode(bit_field_);
+  }
+
+  int offset() {
+    return OffsetBits::decode(bit_field_);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  static const int kBitsPerRegisterNumber = 6;
+  STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
+  class SourceRegisterBits:
+      public BitField<int, 0, kBitsPerRegisterNumber> {};  // NOLINT
+  class DestinationRegisterBits:
+      public BitField<int, kBitsPerRegisterNumber,
+        kBitsPerRegisterNumber> {};  // NOLINT
+  class IsTruncatingBits:
+      public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {};  // NOLINT
+  class OffsetBits:
+      public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {};  // NOLINT
+
+  Major MajorKey() { return DoubleToI; }
+  int MinorKey() { return bit_field_; }
+
+  int bit_field_;
+
+  DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
+};
+
+
 class KeyedLoadFastElementStub : public HydrogenCodeStub {
  public:
   KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
@@ -2210,41 +2245,47 @@
 };
 
 
-class ElementsTransitionAndStoreStub : public PlatformCodeStub {
+class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
  public:
   ElementsTransitionAndStoreStub(ElementsKind from,
                                  ElementsKind to,
                                  bool is_jsarray,
-                                 StrictModeFlag strict_mode,
                                  KeyedAccessStoreMode store_mode)
       : from_(from),
         to_(to),
         is_jsarray_(is_jsarray),
-        strict_mode_(strict_mode),
-        store_mode_(store_mode) {}
-
- private:
-  class FromBits:       public BitField<ElementsKind,        0, 8> {};
-  class ToBits:         public BitField<ElementsKind,        8, 8> {};
-  class IsJSArrayBits:  public BitField<bool,                16, 1> {};
-  class StrictModeBits: public BitField<StrictModeFlag,      17, 1> {};
-  class StoreModeBits: public BitField<KeyedAccessStoreMode, 18, 4> {};
-
-  Major MajorKey() { return ElementsTransitionAndStore; }
-  int MinorKey() {
-    return FromBits::encode(from_) |
-        ToBits::encode(to_) |
-        IsJSArrayBits::encode(is_jsarray_) |
-        StrictModeBits::encode(strict_mode_) |
-        StoreModeBits::encode(store_mode_);
+        store_mode_(store_mode) {
+    ASSERT(!IsFastHoleyElementsKind(from) || IsFastHoleyElementsKind(to));
   }
 
-  void Generate(MacroAssembler* masm);
+  ElementsKind from() const { return from_; }
+  ElementsKind to() const { return to_; }
+  bool is_jsarray() const { return is_jsarray_; }
+  KeyedAccessStoreMode store_mode() const { return store_mode_; }
+
+  Handle<Code> GenerateCode();
+
+  void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+  class FromBits:      public BitField<ElementsKind,          0, 8> {};
+  class ToBits:        public BitField<ElementsKind,          8, 8> {};
+  class IsJSArrayBits: public BitField<bool,                 16, 1> {};
+  class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
+
+  Major MajorKey() { return ElementsTransitionAndStore; }
+  int NotMissMinorKey() {
+    return FromBits::encode(from()) |
+        ToBits::encode(to()) |
+        IsJSArrayBits::encode(is_jsarray()) |
+        StoreModeBits::encode(store_mode());
+  }
 
   ElementsKind from_;
   ElementsKind to_;
   bool is_jsarray_;
-  StrictModeFlag strict_mode_;
   KeyedAccessStoreMode store_mode_;
 
   DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
diff --git a/src/compiler.cc b/src/compiler.cc
index 7497f09..c299577 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -565,8 +565,7 @@
     if (info->is_eval()) {
       StackTraceFrameIterator it(isolate);
       if (!it.done()) {
-        script->set_eval_from_shared(
-            JSFunction::cast(it.frame()->function())->shared());
+        script->set_eval_from_shared(it.frame()->function()->shared());
         Code* code = it.frame()->LookupCode();
         int offset = static_cast<int>(
             it.frame()->pc() - code->instruction_start());
diff --git a/src/d8.cc b/src/d8.cc
index 21daa0b..3ac8db0 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -68,10 +68,6 @@
 #include "v8.h"
 #endif  // V8_SHARED
 
-#if defined(V8_I18N_SUPPORT)
-#include "icu_util.h"
-#endif
-
 #if !defined(_WIN32) && !defined(_WIN64)
 #include <unistd.h>  // NOLINT
 #endif
@@ -1586,9 +1582,7 @@
 
 int Shell::Main(int argc, char* argv[]) {
   if (!SetOptions(argc, argv)) return 1;
-#if defined(V8_I18N_SUPPORT)
-  InitializeICU();
-#endif
+  v8::V8::InitializeICU();
 #ifndef V8_SHARED
   i::FLAG_harmony_array_buffer = true;
   i::FLAG_harmony_typed_arrays = true;
diff --git a/src/d8.gyp b/src/d8.gyp
index 6a57e12..15d342d 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -80,12 +80,14 @@
           ],
         }],
         ['v8_enable_i18n_support==1', {
-          'sources': [
-            'icu_util.cc',
-            'icu_util.h',
-          ],
           'dependencies': [
-            '<(DEPTH)/third_party/icu/icu.gyp:*',
+            '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+            '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+          ],
+        }],
+        ['OS=="win" and v8_enable_i18n_support==1', {
+          'dependencies': [
+            '<(DEPTH)/third_party/icu/icu.gyp:icudata',
           ],
         }],
       ],
diff --git a/src/debug.cc b/src/debug.cc
index 5d39a1c..41eac5f 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -965,7 +965,7 @@
 
   // Get the debug info (create it if it does not exist).
   Handle<SharedFunctionInfo> shared =
-      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+      Handle<SharedFunctionInfo>(frame->function()->shared());
   Handle<DebugInfo> debug_info = GetDebugInfo(shared);
 
   // Find the break point where execution has stopped.
@@ -1348,8 +1348,7 @@
     JavaScriptFrame* frame = it.frame();
     if (frame->HasHandler()) {
       // Flood the function with the catch block with break points
-      JSFunction* function = JSFunction::cast(frame->function());
-      FloodWithOneShot(Handle<JSFunction>(function));
+      FloodWithOneShot(Handle<JSFunction>(frame->function()));
       return;
     }
   }
@@ -1415,13 +1414,13 @@
     // breakpoints.
     frames_it.Advance();
     // Fill the function to return to with one-shot break points.
-    JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+    JSFunction* function = frames_it.frame()->function();
     FloodWithOneShot(Handle<JSFunction>(function));
     return;
   }
 
   // Get the debug info (create it if it does not exist).
-  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<JSFunction> function(frame->function());
   Handle<SharedFunctionInfo> shared(function->shared());
   if (!EnsureDebugInfo(shared, function)) {
     // Return if ensuring debug info failed.
@@ -1486,15 +1485,14 @@
       frames_it.Advance();
     }
     // Skip builtin functions on the stack.
-    while (!frames_it.done() &&
-           JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
+    while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) {
       frames_it.Advance();
     }
     // Step out: If there is a JavaScript caller frame, we need to
     // flood it with breakpoints.
     if (!frames_it.done()) {
       // Fill the function to return to with one-shot break points.
-      JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+      JSFunction* function = frames_it.frame()->function();
       FloodWithOneShot(Handle<JSFunction>(function));
       // Set target frame pointer.
       ActivateStepOut(frames_it.frame());
@@ -1916,7 +1914,7 @@
         function->shared()->code()->set_gc_metadata(active_code_marker);
       }
     } else if (frame->function()->IsJSFunction()) {
-      JSFunction* function = JSFunction::cast(frame->function());
+      JSFunction* function = frame->function();
       ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
       active_functions->Add(Handle<JSFunction>(function));
       function->shared()->code()->set_gc_metadata(active_code_marker);
@@ -1933,7 +1931,7 @@
 
     if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
 
-    JSFunction* function = JSFunction::cast(frame->function());
+    JSFunction* function = frame->function();
 
     ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index f322e85..5f59fd9 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -186,7 +186,7 @@
   ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
 
   // Get the function and code from the frame.
-  JSFunction* function = JSFunction::cast(frame->function());
+  JSFunction* function = frame->function();
   Code* code = frame->LookupCode();
 
   // Locate the deoptimization point in the code. As we are at a call the
@@ -542,6 +542,7 @@
   if (function->IsSmi()) {
     function = NULL;
   }
+  ASSERT(from != NULL);
   if (function != NULL && function->IsOptimized()) {
     function->shared()->increment_deopt_count();
     if (bailout_type_ == Deoptimizer::SOFT) {
@@ -573,8 +574,6 @@
   switch (bailout_type_) {
     case Deoptimizer::SOFT:
     case Deoptimizer::EAGER:
-      ASSERT(from_ == NULL);
-      return function->code();
     case Deoptimizer::LAZY: {
       Code* compiled_code =
           isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
@@ -1609,7 +1608,7 @@
   for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
     if (frame_index != 0) it->Advance();
     JavaScriptFrame* frame = it->frame();
-    Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
+    Handle<JSFunction> function(frame->function(), isolate_);
     Handle<JSObject> arguments;
     for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
       if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
diff --git a/src/elements-kind.h b/src/elements-kind.h
index da15192..151dde4 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -229,6 +229,15 @@
 }
 
 
+inline bool DoesTransitionChangeElementsBufferFormat(ElementsKind from_kind,
+                                                     ElementsKind to_kind) {
+  return (IsFastSmiElementsKind(from_kind) &&
+          IsFastDoubleElementsKind(to_kind)) ||
+      (IsFastDoubleElementsKind(from_kind) &&
+       IsFastObjectElementsKind(to_kind));
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ELEMENTS_KIND_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a0f907d..6a2b151 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -215,6 +215,7 @@
 DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
 DEFINE_bool(use_inlining, true, "use function inlining")
 DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_allocation_folding, true, "use allocation folding")
 DEFINE_int(max_inlined_source_size, 600,
            "maximum source size in bytes considered for a single inlining")
 DEFINE_int(max_inlined_nodes, 196,
@@ -236,6 +237,7 @@
 DEFINE_bool(trace_gvn, false, "trace global value numbering")
 DEFINE_bool(trace_representation, false, "trace representation types")
 DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
+DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
 DEFINE_bool(trace_track_allocation_sites, false,
             "trace the tracking of allocation sites")
 DEFINE_bool(trace_migration, false, "trace object migration")
@@ -348,6 +350,8 @@
             "enable use of VFP3 instructions if available")
 DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
             "enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_neon, true,
+            "enable use of NEON instructions if available (ARM only)")
 DEFINE_bool(enable_sudiv, true,
             "enable use of SDIV and UDIV instructions if available (ARM only)")
 DEFINE_bool(enable_movw_movt, false,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 8d10645..d097ed1 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -274,10 +274,8 @@
 }
 
 
-inline Object* JavaScriptFrame::function() const {
-  Object* result = function_slot_object();
-  ASSERT(result->IsJSFunction());
-  return result;
+inline JSFunction* JavaScriptFrame::function() const {
+  return JSFunction::cast(function_slot_object());
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index b20a7ea..29d3456 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -205,7 +205,7 @@
 
 bool StackTraceFrameIterator::IsValidFrame() {
     if (!frame()->function()->IsJSFunction()) return false;
-    Object* script = JSFunction::cast(frame()->function())->shared()->script();
+    Object* script = frame()->function()->shared()->script();
     // Don't show functions from native scripts to user.
     return (script->IsScript() &&
             Script::TYPE_NATIVE != Script::cast(script)->type()->value());
@@ -724,8 +724,7 @@
 
 
 Code* JavaScriptFrame::unchecked_code() const {
-  JSFunction* function = JSFunction::cast(this->function());
-  return function->code();
+  return function()->code();
 }
 
 
@@ -733,8 +732,7 @@
   ASSERT(can_access_heap_objects() &&
          isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
 
-  JSFunction* function = JSFunction::cast(this->function());
-  return function->shared()->formal_parameter_count();
+  return function()->shared()->formal_parameter_count();
 }
 
 
@@ -745,7 +743,7 @@
 
 void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
   ASSERT(functions->length() == 0);
-  functions->Add(JSFunction::cast(function()));
+  functions->Add(function());
 }
 
 
@@ -754,7 +752,7 @@
   Code* code_pointer = LookupCode();
   int offset = static_cast<int>(pc() - code_pointer->address());
   FrameSummary summary(receiver(),
-                       JSFunction::cast(function()),
+                       function(),
                        code_pointer,
                        offset,
                        IsConstructor());
@@ -775,40 +773,35 @@
       JavaScriptFrame* frame = it.frame();
       if (frame->IsConstructor()) PrintF(file, "new ");
       // function name
-      Object* maybe_fun = frame->function();
-      if (maybe_fun->IsJSFunction()) {
-        JSFunction* fun = JSFunction::cast(maybe_fun);
-        fun->PrintName();
-        Code* js_code = frame->unchecked_code();
-        Address pc = frame->pc();
-        int code_offset =
-            static_cast<int>(pc - js_code->instruction_start());
-        PrintF("+%d", code_offset);
-        SharedFunctionInfo* shared = fun->shared();
-        if (print_line_number) {
-          Code* code = Code::cast(
-              v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
-          int source_pos = code->SourcePosition(pc);
-          Object* maybe_script = shared->script();
-          if (maybe_script->IsScript()) {
-            Handle<Script> script(Script::cast(maybe_script));
-            int line = GetScriptLineNumberSafe(script, source_pos) + 1;
-            Object* script_name_raw = script->name();
-            if (script_name_raw->IsString()) {
-              String* script_name = String::cast(script->name());
-              SmartArrayPointer<char> c_script_name =
-                  script_name->ToCString(DISALLOW_NULLS,
-                                         ROBUST_STRING_TRAVERSAL);
-              PrintF(file, " at %s:%d", *c_script_name, line);
-            } else {
-              PrintF(file, " at <unknown>:%d", line);
-            }
+      JSFunction* fun = frame->function();
+      fun->PrintName();
+      Code* js_code = frame->unchecked_code();
+      Address pc = frame->pc();
+      int code_offset =
+          static_cast<int>(pc - js_code->instruction_start());
+      PrintF("+%d", code_offset);
+      SharedFunctionInfo* shared = fun->shared();
+      if (print_line_number) {
+        Code* code = Code::cast(
+            v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
+        int source_pos = code->SourcePosition(pc);
+        Object* maybe_script = shared->script();
+        if (maybe_script->IsScript()) {
+          Handle<Script> script(Script::cast(maybe_script));
+          int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+          Object* script_name_raw = script->name();
+          if (script_name_raw->IsString()) {
+            String* script_name = String::cast(script->name());
+            SmartArrayPointer<char> c_script_name =
+                script_name->ToCString(DISALLOW_NULLS,
+                                       ROBUST_STRING_TRAVERSAL);
+            PrintF(file, " at %s:%d", *c_script_name, line);
           } else {
-            PrintF(file, " at <unknown>:<unknown>");
+            PrintF(file, " at <unknown>:%d", line);
           }
+        } else {
+          PrintF(file, " at <unknown>:<unknown>");
         }
-      } else {
-        PrintF("<unknown>");
       }
 
       if (print_args) {
@@ -913,7 +906,7 @@
 JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
                                       int literal_id) {
   if (literal_id == Translation::kSelfLiteralId) {
-    return JSFunction::cast(function());
+    return function();
   }
 
   return JSFunction::cast(literal_array->get(literal_id));
@@ -1018,7 +1011,7 @@
     int* deopt_index) {
   ASSERT(is_optimized());
 
-  JSFunction* opt_function = JSFunction::cast(function());
+  JSFunction* opt_function = function();
   Code* code = opt_function->code();
 
   // The code object may have been replaced by lazy deoptimization. Fall
@@ -1132,7 +1125,7 @@
                             int index) const {
   HandleScope scope(isolate());
   Object* receiver = this->receiver();
-  Object* function = this->function();
+  JSFunction* function = this->function();
 
   accumulator->PrintSecurityTokenIfChanged(function);
   PrintIndex(accumulator, mode, index);
@@ -1146,29 +1139,27 @@
   // or context slots.
   Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
 
-  if (function->IsJSFunction()) {
-    Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
-    scope_info = Handle<ScopeInfo>(shared->scope_info());
-    Object* script_obj = shared->script();
-    if (script_obj->IsScript()) {
-      Handle<Script> script(Script::cast(script_obj));
-      accumulator->Add(" [");
-      accumulator->PrintName(script->name());
+  Handle<SharedFunctionInfo> shared(function->shared());
+  scope_info = Handle<ScopeInfo>(shared->scope_info());
+  Object* script_obj = shared->script();
+  if (script_obj->IsScript()) {
+    Handle<Script> script(Script::cast(script_obj));
+    accumulator->Add(" [");
+    accumulator->PrintName(script->name());
 
-      Address pc = this->pc();
-      if (code != NULL && code->kind() == Code::FUNCTION &&
-          pc >= code->instruction_start() && pc < code->instruction_end()) {
-        int source_pos = code->SourcePosition(pc);
-        int line = GetScriptLineNumberSafe(script, source_pos) + 1;
-        accumulator->Add(":%d", line);
-      } else {
-        int function_start_pos = shared->start_position();
-        int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
-        accumulator->Add(":~%d", line);
-      }
-
-      accumulator->Add("] ");
+    Address pc = this->pc();
+    if (code != NULL && code->kind() == Code::FUNCTION &&
+        pc >= code->instruction_start() && pc < code->instruction_end()) {
+      int source_pos = code->SourcePosition(pc);
+      int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+      accumulator->Add(":%d", line);
+    } else {
+      int function_start_pos = shared->start_position();
+      int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
+      accumulator->Add(":~%d", line);
     }
+
+    accumulator->Add("] ");
   }
 
   accumulator->Add("(this=%o", receiver);
@@ -1258,7 +1249,7 @@
 
   // Print details about the function.
   if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
-    SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
+    SharedFunctionInfo* shared = function->shared();
     accumulator->Add("--------- s o u r c e   c o d e ---------\n");
     shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
     accumulator->Add("\n-----------------------------------------\n");
@@ -1273,10 +1264,8 @@
                                   int index) const {
   int actual = ComputeParametersCount();
   int expected = -1;
-  Object* function = this->function();
-  if (function->IsJSFunction()) {
-    expected = JSFunction::cast(function)->shared()->formal_parameter_count();
-  }
+  JSFunction* function = this->function();
+  expected = function->shared()->formal_parameter_count();
 
   PrintIndex(accumulator, mode, index);
   accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
diff --git a/src/frames.h b/src/frames.h
index 0a5b609..7e667a6 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -543,7 +543,7 @@
   virtual Type type() const { return JAVA_SCRIPT; }
 
   // Accessors.
-  inline Object* function() const;
+  inline JSFunction* function() const;
   inline Object* receiver() const;
   inline void set_receiver(Object* value);
 
diff --git a/src/globals.h b/src/globals.h
index baacf52..f00e676 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -89,12 +89,6 @@
 #elif defined(__ARMEL__)
 #define V8_HOST_ARCH_ARM 1
 #define V8_HOST_ARCH_32_BIT 1
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif
 #elif defined(__MIPSEL__)
 #define V8_HOST_ARCH_MIPS 1
 #define V8_HOST_ARCH_32_BIT 1
@@ -102,6 +96,16 @@
 #error Host architecture was not detected as supported by v8
 #endif
 
+#if defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7R__) || \
+    defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+#  define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
 // Target architecture detection. This may be set externally. If not, detect
 // in the same way as the host architecture, that is, target the native
 // environment as presented by the compiler.
diff --git a/src/handles.cc b/src/handles.cc
index fc45aaa..c02801f 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -650,6 +650,10 @@
                                  isolate->heap()->undefined_value(),
                                  v8::ACCESS_KEYS)) {
       isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+      if (isolate->has_scheduled_exception()) {
+        isolate->PromoteScheduledException();
+        *threw = true;
+      }
       break;
     }
 
diff --git a/src/heap.cc b/src/heap.cc
index ebf1487..0b2fe16 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3172,6 +3172,11 @@
   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
 
+  { MaybeObject* maybe_obj = AllocateSymbol();
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_observed_symbol(Symbol::cast(obj));
+
   // Handling of script id generation is in Factory::NewScript.
   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
 
diff --git a/src/heap.h b/src/heap.h
index 4bc2d30..2c97faf 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -187,7 +187,8 @@
   V(Map, external_map, ExternalMap)                                            \
   V(Symbol, frozen_symbol, FrozenSymbol)                                       \
   V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
-      EmptySlowElementDictionary)
+      EmptySlowElementDictionary)                                              \
+  V(Symbol, observed_symbol, ObservedSymbol)
 
 #define ROOT_LIST(V)                                  \
   STRONG_ROOT_LIST(V)                                 \
diff --git a/src/hydrogen-canonicalize.cc b/src/hydrogen-canonicalize.cc
new file mode 100644
index 0000000..40cbe4c
--- /dev/null
+++ b/src/hydrogen-canonicalize.cc
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-canonicalize.h"
+
+namespace v8 {
+namespace internal {
+
+void HCanonicalizePhase::Run() {
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  // Before removing no-op instructions, save their semantic value.
+  // We must be careful not to set the flag unnecessarily, because GVN
+  // cannot identify two instructions when their flag value differs.
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->IsArithmeticBinaryOperation() &&
+          instr->representation().IsInteger32() &&
+          instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+              HInstruction::kTruncatingToInt32)) {
+        instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+      }
+    }
+  }
+  // Perform actual Canonicalization pass.
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      HValue* value = instr->Canonicalize();
+      if (value != instr) instr->DeleteAndReplaceWith(value);
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-canonicalize.h b/src/hydrogen-canonicalize.h
new file mode 100644
index 0000000..d2b289b
--- /dev/null
+++ b/src/hydrogen-canonicalize.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_CANONICALIZE_H_
+#define V8_HYDROGEN_CANONICALIZE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HCanonicalizePhase : public HPhase {
+ public:
+  explicit HCanonicalizePhase(HGraph* graph)
+      : HPhase("H_Canonicalize", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_CANONICALIZE_H_
diff --git a/src/hydrogen-dehoist.cc b/src/hydrogen-dehoist.cc
new file mode 100644
index 0000000..696d22c
--- /dev/null
+++ b/src/hydrogen-dehoist.cc
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-dehoist.h"
+
+namespace v8 {
+namespace internal {
+
+static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
+  HValue* index = array_operation->GetKey()->ActualValue();
+  if (!index->representation().IsSmiOrInteger32()) return;
+  if (!index->IsAdd() && !index->IsSub()) return;
+
+  HConstant* constant;
+  HValue* subexpression;
+  HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
+  if (binary_operation->left()->IsConstant()) {
+    subexpression = binary_operation->right();
+    constant = HConstant::cast(binary_operation->left());
+  } else if (binary_operation->right()->IsConstant()) {
+    subexpression = binary_operation->left();
+    constant = HConstant::cast(binary_operation->right());
+  } else {
+    return;
+  }
+
+  if (!constant->HasInteger32Value()) return;
+  int32_t sign = binary_operation->IsSub() ? -1 : 1;
+  int32_t value = constant->Integer32Value() * sign;
+  // We limit offset values to 30 bits because we want to avoid the risk of
+  // overflows when the offset is added to the object header size.
+  if (value >= 1 << 30 || value < 0) return;
+  array_operation->SetKey(subexpression);
+  if (binary_operation->HasNoUses()) {
+    binary_operation->DeleteAndReplaceWith(NULL);
+  }
+  array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+  array_operation->SetDehoisted(true);
+}
+
+
+void HDehoistIndexComputationsPhase::Run() {
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->IsLoadKeyed()) {
+        DehoistArrayIndex(HLoadKeyed::cast(instr));
+      } else if (instr->IsStoreKeyed()) {
+        DehoistArrayIndex(HStoreKeyed::cast(instr));
+      }
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-dehoist.h b/src/hydrogen-dehoist.h
new file mode 100644
index 0000000..140dc6e
--- /dev/null
+++ b/src/hydrogen-dehoist.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DEHOIST_H_
+#define V8_HYDROGEN_DEHOIST_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDehoistIndexComputationsPhase : public HPhase {
+ public:
+  explicit HDehoistIndexComputationsPhase(HGraph* graph)
+      : HPhase("H_Dehoist index computations", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_DEHOIST_H_
diff --git a/src/hydrogen-gvn.cc b/src/hydrogen-gvn.cc
index fd7430c..4a946ea 100644
--- a/src/hydrogen-gvn.cc
+++ b/src/hydrogen-gvn.cc
@@ -619,7 +619,7 @@
   GVNFlagSet side_effects;
   for (int i = 0; i < dominated->predecessors()->length(); ++i) {
     HBasicBlock* block = dominated->predecessors()->at(i);
-    if (dominator->block_id() < block->block_id() &&
+    if (dominator->block_id() <= block->block_id() &&
         block->block_id() < dominated->block_id() &&
         visited_on_paths_.Add(block->block_id())) {
       side_effects.Add(block_side_effects_[block->block_id()]);
@@ -776,9 +776,28 @@
     }
 
     // Go through all instructions of the current block.
-    HInstruction* instr = block->first();
-    while (instr != NULL) {
-      HInstruction* next = instr->next();
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+        for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+          HValue* other = dominators->at(i);
+          GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+          GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+          if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+              (other != NULL)) {
+            TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+                        i,
+                        instr->id(),
+                        instr->Mnemonic(),
+                        other->id(),
+                        other->Mnemonic());
+            instr->HandleSideEffectDominator(changes_flag, other);
+          }
+        }
+      }
+      // Instruction was unlinked during graph traversal.
+      if (!instr->IsLinked()) continue;
+
       GVNFlagSet flags = instr->ChangesFlags();
       if (!flags.IsEmpty()) {
         // Clear all instructions in the map that are affected by side effects.
@@ -804,25 +823,6 @@
           map->Add(instr, zone());
         }
       }
-      if (instr->IsLinked() &&
-          instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
-        for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
-          HValue* other = dominators->at(i);
-          GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
-          GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
-          if (instr->DependsOnFlags().Contains(depends_on_flag) &&
-              (other != NULL)) {
-            TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
-                        i,
-                        instr->id(),
-                        instr->Mnemonic(),
-                        other->id(),
-                        other->Mnemonic());
-            instr->SetSideEffectDominator(changes_flag, other);
-          }
-        }
-      }
-      instr = next;
     }
 
     HBasicBlock* dominator_block;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 0143baa..1e6073a 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1654,8 +1654,8 @@
 }
 
 
-void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
-                                        HValue* dominator) {
+void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
+                                           HValue* dominator) {
   ASSERT(side_effect == kChangesMaps);
   // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
   // type information is rich enough we should generalize this to any HType
@@ -3183,6 +3183,85 @@
 }
 
 
+void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+                                          HValue* dominator) {
+  ASSERT(side_effect == kChangesNewSpacePromotion);
+  // Try to fold allocations together with their dominating allocations.
+  if (!FLAG_use_allocation_folding || !dominator->IsAllocate()) {
+    return;
+  }
+  HAllocate* dominator_allocate_instr = HAllocate::cast(dominator);
+  HValue* dominator_size = dominator_allocate_instr->size();
+  HValue* current_size = size();
+  // We can just fold allocations that are guaranteed in new space.
+  // TODO(hpayer): Support double aligned allocations.
+  // TODO(hpayer): Add support for non-constant allocation in dominator.
+  if (!GuaranteedInNewSpace() || MustAllocateDoubleAligned() ||
+      !current_size->IsInteger32Constant() ||
+      !dominator_allocate_instr->GuaranteedInNewSpace() ||
+      dominator_allocate_instr->MustAllocateDoubleAligned() ||
+      !dominator_size->IsInteger32Constant()) {
+    return;
+  }
+
+  // First update the size of the dominator allocate instruction.
+  int32_t dominator_size_constant =
+      HConstant::cast(dominator_size)->GetInteger32Constant();
+  int32_t current_size_constant =
+      HConstant::cast(current_size)->GetInteger32Constant();
+  HBasicBlock* block = dominator->block();
+  Zone* zone = block->zone();
+  HInstruction* new_dominator_size = new(zone) HConstant(
+      dominator_size_constant + current_size_constant);
+  new_dominator_size->InsertBefore(dominator_allocate_instr);
+  dominator_allocate_instr->UpdateSize(new_dominator_size);
+
+#ifdef VERIFY_HEAP
+  HInstruction* free_space_instr =
+      new(zone) HInnerAllocatedObject(dominator_allocate_instr,
+                                      dominator_size_constant,
+                                      type());
+  free_space_instr->InsertAfter(dominator_allocate_instr);
+  HConstant* filler_map = new(zone) HConstant(
+      isolate()->factory()->free_space_map(),
+      UniqueValueId(isolate()->heap()->free_space_map()),
+      Representation::Tagged(),
+      HType::Tagged(),
+      false,
+      true,
+      false,
+      false);
+  filler_map->InsertAfter(free_space_instr);
+
+  HInstruction* store_map = new(zone) HStoreNamedField(
+      free_space_instr, HObjectAccess::ForMap(), filler_map);
+  store_map->SetFlag(HValue::kHasNoObservableSideEffects);
+  store_map->InsertAfter(filler_map);
+
+  HInstruction* free_space_size = new(zone) HConstant(current_size_constant);
+  free_space_size->InsertAfter(store_map);
+  HObjectAccess access =
+      HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
+  HInstruction* store_size = new(zone) HStoreNamedField(
+      free_space_instr, access, free_space_size);
+  store_size->SetFlag(HValue::kHasNoObservableSideEffects);
+  store_size->InsertAfter(free_space_size);
+#endif
+
+  // After that replace the dominated allocate instruction.
+  HInstruction* dominated_allocate_instr =
+      new(zone) HInnerAllocatedObject(dominator_allocate_instr,
+                                      dominator_size_constant,
+                                      type());
+  dominated_allocate_instr->InsertBefore(this);
+  DeleteAndReplaceWith(dominated_allocate_instr);
+  if (FLAG_trace_allocation_folding) {
+    PrintF("#%d (%s) folded into #%d (%s)\n",
+        id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+  }
+}
+
+
 void HAllocate::PrintDataTo(StringStream* stream) {
   size()->PrintNameTo(stream);
   if (!GuaranteedInNewSpace()) stream->Add(" (pretenure)");
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7bca40c..cb2225d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -790,7 +790,7 @@
     // occurrences of the instruction are indeed the same.
     kUseGVN,
     // Track instructions that are dominating side effects. If an instruction
-    // sets this flag, it must implement SetSideEffectDominator() and should
+    // sets this flag, it must implement HandleSideEffectDominator() and should
     // indicate which side effects to track by setting GVN flags.
     kTrackSideEffectDominators,
     kCanOverflow,
@@ -1109,7 +1109,8 @@
   // This function must be overridden for instructions which have the
   // kTrackSideEffectDominators flag set, to track instructions that are
   // dominating side effects.
-  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+  virtual void HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator) {
     UNREACHABLE();
   }
 
@@ -2774,7 +2775,8 @@
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
-  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator);
+  virtual void HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator);
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
 
@@ -4975,10 +4977,12 @@
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+
   virtual Handle<Map> GetMonomorphicJSObjectMap() {
     ASSERT(!constructor_initial_map_.is_null());
     return constructor_initial_map_;
   }
+
   virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
@@ -5007,7 +5011,9 @@
     SetOperandAt(0, context);
     SetOperandAt(1, size);
     set_representation(Representation::Tagged());
+    SetFlag(kTrackSideEffectDominators);
     SetGVNFlag(kChangesNewSpacePromotion);
+    SetGVNFlag(kDependsOnNewSpacePromotion);
   }
 
   static Flags DefaultFlags() {
@@ -5025,6 +5031,7 @@
 
   HValue* context() { return OperandAt(0); }
   HValue* size() { return OperandAt(1); }
+  HType type() { return type_; }
 
   virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
@@ -5061,6 +5068,13 @@
     return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
   }
 
+  void UpdateSize(HValue* size) {
+    SetOperandAt(1, size);
+  }
+
+  virtual void HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator);
+
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(Allocate)
@@ -5073,8 +5087,9 @@
 
 class HInnerAllocatedObject: public HTemplateInstruction<1> {
  public:
-  HInnerAllocatedObject(HValue* value, int offset)
-      : offset_(offset) {
+  HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
+      : offset_(offset),
+        type_(type) {
     ASSERT(value->IsAllocate());
     SetOperandAt(0, value);
     set_representation(Representation::Tagged());
@@ -5087,12 +5102,15 @@
     return Representation::Tagged();
   }
 
+  virtual HType CalculateInferredType() { return type_; }
+
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
 
  private:
   int offset_;
+  HType type_;
 };
 
 
@@ -5815,7 +5833,8 @@
     }
     return Representation::Tagged();
   }
-  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+  virtual void HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator) {
     ASSERT(side_effect == kChangesNewSpacePromotion);
     new_space_dominator_ = dominator;
   }
@@ -6017,7 +6036,8 @@
     return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
   }
 
-  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+  virtual void HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator) {
     ASSERT(side_effect == kChangesNewSpacePromotion);
     new_space_dominator_ = dominator;
   }
diff --git a/src/hydrogen-removable-simulates.cc b/src/hydrogen-removable-simulates.cc
new file mode 100644
index 0000000..f952832
--- /dev/null
+++ b/src/hydrogen-removable-simulates.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-removable-simulates.h"
+
+namespace v8 {
+namespace internal {
+
+void HMergeRemovableSimulatesPhase::Run() {
+  ZoneList<HSimulate*> mergelist(2, zone());
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    // Make sure the merge list is empty at the start of a block.
+    ASSERT(mergelist.is_empty());
+    // Nasty heuristic: Never remove the first simulate in a block. This
+    // just so happens to have a beneficial effect on register allocation.
+    bool first = true;
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* current = it.Current();
+      if (current->IsLeaveInlined()) {
+        // Never fold simulates from inlined environments into simulates
+        // in the outer environment.
+        // (Before each HEnterInlined, there is a non-foldable HSimulate
+        // anyway, so we get the barrier in the other direction for free.)
+        // Simply remove all accumulated simulates without merging.  This
+        // is safe because simulates after instructions with side effects
+        // are never added to the merge list.
+        while (!mergelist.is_empty()) {
+          mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+        }
+        continue;
+      }
+      if (current->IsReturn()) {
+        // Drop mergeable simulates in the list. This is safe because
+        // simulates after instructions with side effects are never added
+        // to the merge list.
+        while (!mergelist.is_empty()) {
+          mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+        }
+        continue;
+      }
+      // Skip the non-simulates and the first simulate.
+      if (!current->IsSimulate()) continue;
+      if (first) {
+        first = false;
+        continue;
+      }
+      HSimulate* current_simulate = HSimulate::cast(current);
+      if ((current_simulate->previous()->HasObservableSideEffects() &&
+           !current_simulate->next()->IsSimulate()) ||
+          !current_simulate->is_candidate_for_removal()) {
+        // This simulate is not suitable for folding.
+        // Fold the ones accumulated so far.
+        current_simulate->MergeWith(&mergelist);
+        continue;
+      } else {
+        // Accumulate this simulate for folding later on.
+        mergelist.Add(current_simulate, zone());
+      }
+    }
+
+    if (!mergelist.is_empty()) {
+      // Merge the accumulated simulates at the end of the block.
+      HSimulate* last = mergelist.RemoveLast();
+      last->MergeWith(&mergelist);
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-removable-simulates.h b/src/hydrogen-removable-simulates.h
new file mode 100644
index 0000000..f5bcd6d
--- /dev/null
+++ b/src/hydrogen-removable-simulates.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMergeRemovableSimulatesPhase : public HPhase {
+ public:
+  explicit HMergeRemovableSimulatesPhase(HGraph* graph)
+      : HPhase("H_Merge removable simulates", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 1b11330..842c2ca 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -34,7 +34,9 @@
 #include "full-codegen.h"
 #include "hashmap.h"
 #include "hydrogen-bce.h"
+#include "hydrogen-canonicalize.h"
 #include "hydrogen-dce.h"
+#include "hydrogen-dehoist.h"
 #include "hydrogen-environment-liveness.h"
 #include "hydrogen-escape-analysis.h"
 #include "hydrogen-infer-representation.h"
@@ -44,6 +46,7 @@
 #include "hydrogen-osr.h"
 #include "hydrogen-range-analysis.h"
 #include "hydrogen-redundant-phi.h"
+#include "hydrogen-removable-simulates.h"
 #include "hydrogen-representation-changes.h"
 #include "hydrogen-sce.h"
 #include "hydrogen-uint32-analysis.h"
@@ -1143,21 +1146,22 @@
   Zone* zone = this->zone();
   IfBuilder length_checker(this);
 
-  length_checker.If<HCompareNumericAndBranch>(length, key, Token::EQ);
+  Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
+  length_checker.If<HCompareNumericAndBranch>(key, length, token);
+
   length_checker.Then();
 
   HValue* current_capacity = AddLoadFixedArrayLength(elements);
 
   IfBuilder capacity_checker(this);
 
-  capacity_checker.If<HCompareNumericAndBranch>(length, current_capacity,
-                                                Token::EQ);
+  capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
+                                                Token::GTE);
   capacity_checker.Then();
 
   HValue* context = environment()->LookupContext();
 
-  HValue* new_capacity =
-      BuildNewElementsCapacity(context, current_capacity);
+  HValue* new_capacity = BuildNewElementsCapacity(context, key);
 
   HValue* new_elements = BuildGrowElementsCapacity(object, elements,
                                                    kind, kind, length,
@@ -1171,7 +1175,7 @@
 
   if (is_js_array) {
     HValue* new_length = AddInstruction(
-        HAdd::New(zone, context, length, graph_->GetConstant1()));
+        HAdd::New(zone, context, key, graph_->GetConstant1()));
     new_length->ClearFlag(HValue::kCanOverflow);
 
     Representation representation = IsFastElementsKind(kind)
@@ -1181,10 +1185,9 @@
   }
 
   length_checker.Else();
-
   Add<HBoundsCheck>(key, length);
-  environment()->Push(elements);
 
+  environment()->Push(elements);
   length_checker.End();
 
   return environment()->Pop();
@@ -2090,33 +2093,6 @@
 }
 
 
-void HGraph::Canonicalize() {
-  HPhase phase("H_Canonicalize", this);
-  // Before removing no-op instructions, save their semantic value.
-  // We must be careful not to set the flag unnecessarily, because GVN
-  // cannot identify two instructions when their flag value differs.
-  for (int i = 0; i < blocks()->length(); ++i) {
-    for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
-      HInstruction* instr = it.Current();
-      if (instr->IsArithmeticBinaryOperation() &&
-          instr->representation().IsInteger32() &&
-          instr->HasAtLeastOneUseWithFlagAndNoneWithout(
-              HInstruction::kTruncatingToInt32)) {
-        instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
-      }
-    }
-  }
-  // Perform actual Canonicalization pass.
-  for (int i = 0; i < blocks()->length(); ++i) {
-    for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
-      HInstruction* instr = it.Current();
-      HValue* value = instr->Canonicalize();
-      if (value != instr) instr->DeleteAndReplaceWith(value);
-    }
-  }
-}
-
-
 // Block ordering was implemented with two mutually recursive methods,
 // HGraph::Postorder and HGraph::PostorderLoopBlocks.
 // The recursion could lead to stack overflow so the algorithm has been
@@ -2572,69 +2548,6 @@
 }
 
 
-void HGraph::MergeRemovableSimulates() {
-  HPhase phase("H_Merge removable simulates", this);
-  ZoneList<HSimulate*> mergelist(2, zone());
-  for (int i = 0; i < blocks()->length(); ++i) {
-    HBasicBlock* block = blocks()->at(i);
-    // Make sure the merge list is empty at the start of a block.
-    ASSERT(mergelist.is_empty());
-    // Nasty heuristic: Never remove the first simulate in a block. This
-    // just so happens to have a beneficial effect on register allocation.
-    bool first = true;
-    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
-      HInstruction* current = it.Current();
-      if (current->IsLeaveInlined()) {
-        // Never fold simulates from inlined environments into simulates
-        // in the outer environment.
-        // (Before each HEnterInlined, there is a non-foldable HSimulate
-        // anyway, so we get the barrier in the other direction for free.)
-        // Simply remove all accumulated simulates without merging.  This
-        // is safe because simulates after instructions with side effects
-        // are never added to the merge list.
-        while (!mergelist.is_empty()) {
-          mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
-        }
-        continue;
-      }
-      if (current->IsReturn()) {
-        // Drop mergeable simulates in the list. This is safe because
-        // simulates after instructions with side effects are never added
-        // to the merge list.
-        while (!mergelist.is_empty()) {
-          mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
-        }
-        continue;
-      }
-      // Skip the non-simulates and the first simulate.
-      if (!current->IsSimulate()) continue;
-      if (first) {
-        first = false;
-        continue;
-      }
-      HSimulate* current_simulate = HSimulate::cast(current);
-      if ((current_simulate->previous()->HasObservableSideEffects() &&
-           !current_simulate->next()->IsSimulate()) ||
-          !current_simulate->is_candidate_for_removal()) {
-        // This simulate is not suitable for folding.
-        // Fold the ones accumulated so far.
-        current_simulate->MergeWith(&mergelist);
-        continue;
-      } else {
-        // Accumulate this simulate for folding later on.
-        mergelist.Add(current_simulate, zone());
-      }
-    }
-
-    if (!mergelist.is_empty()) {
-      // Merge the accumulated simulates at the end of the block.
-      HSimulate* last = mergelist.RemoveLast();
-      last->MergeWith(&mergelist);
-    }
-  }
-}
-
-
 void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
   if (!phi->CheckFlag(HValue::kAllowUndefinedAsNaN)) return;
   phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
@@ -3137,7 +3050,7 @@
   // Remove HSimulate instructions that have turned out not to be needed
   // after all by folding them into the following HSimulate.
   // This must happen after inferring representations.
-  MergeRemovableSimulates();
+  Run<HMergeRemovableSimulatesPhase>();
 
   MarkDeoptimizeOnUndefined();
   Run<HRepresentationChangesPhase>();
@@ -3149,7 +3062,7 @@
   // zero.
   if (FLAG_opt_safe_uint32_operations) Run<HUint32AnalysisPhase>();
 
-  if (FLAG_use_canonicalizing) Canonicalize();
+  if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
 
   if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
 
@@ -3166,7 +3079,7 @@
   if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
     Run<HBoundsCheckEliminationPhase>();
   }
-  if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations();
+  if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
   if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
 
   RestoreActualValues();
@@ -3219,77 +3132,6 @@
 }
 
 
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
-  HValue* index = array_operation->GetKey()->ActualValue();
-  if (!index->representation().IsSmiOrInteger32()) return;
-
-  HConstant* constant;
-  HValue* subexpression;
-  int32_t sign;
-  if (index->IsAdd()) {
-    sign = 1;
-    HAdd* add = HAdd::cast(index);
-    if (add->left()->IsConstant()) {
-      subexpression = add->right();
-      constant = HConstant::cast(add->left());
-    } else if (add->right()->IsConstant()) {
-      subexpression = add->left();
-      constant = HConstant::cast(add->right());
-    } else {
-      return;
-    }
-  } else if (index->IsSub()) {
-    sign = -1;
-    HSub* sub = HSub::cast(index);
-    if (sub->left()->IsConstant()) {
-      subexpression = sub->right();
-      constant = HConstant::cast(sub->left());
-    } else if (sub->right()->IsConstant()) {
-      subexpression = sub->left();
-      constant = HConstant::cast(sub->right());
-    } else {
-      return;
-    }
-  } else {
-    return;
-  }
-
-  if (!constant->HasInteger32Value()) return;
-  int32_t value = constant->Integer32Value() * sign;
-  // We limit offset values to 30 bits because we want to avoid the risk of
-  // overflows when the offset is added to the object header size.
-  if (value >= 1 << 30 || value < 0) return;
-  array_operation->SetKey(subexpression);
-  if (index->HasNoUses()) {
-    index->DeleteAndReplaceWith(NULL);
-  }
-  ASSERT(value >= 0);
-  array_operation->SetIndexOffset(static_cast<uint32_t>(value));
-  array_operation->SetDehoisted(true);
-}
-
-
-void HGraph::DehoistSimpleArrayIndexComputations() {
-  HPhase phase("H_Dehoist index computations", this);
-  for (int i = 0; i < blocks()->length(); ++i) {
-    for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
-      HInstruction* instr = it.Current();
-      ArrayInstructionInterface* array_instruction = NULL;
-      if (instr->IsLoadKeyed()) {
-        HLoadKeyed* op = HLoadKeyed::cast(instr);
-        array_instruction = static_cast<ArrayInstructionInterface*>(op);
-      } else if (instr->IsStoreKeyed()) {
-        HStoreKeyed* op = HStoreKeyed::cast(instr);
-        array_instruction = static_cast<ArrayInstructionInterface*>(op);
-      } else {
-        continue;
-      }
-      DehoistArrayIndex(array_instruction);
-    }
-  }
-}
-
-
 void HGraph::RestoreActualValues() {
   HPhase phase("H_Restore actual values", this);
 
@@ -6058,8 +5900,14 @@
         expr->GetStoreMode(), has_side_effects);
   } else {
     if (is_store) {
+      if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
+        AddSoftDeoptimize();
+      }
       instr = BuildStoreKeyedGeneric(obj, key, val);
     } else {
+      if (expr->AsProperty()->IsUninitialized()) {
+        AddSoftDeoptimize();
+      }
       instr = BuildLoadKeyedGeneric(obj, key);
     }
     AddInstruction(instr);
@@ -7362,12 +7210,11 @@
 
   } else {
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
-    bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
-
     if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
       return Bailout("possible direct call to eval");
     }
 
+    bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
     if (global_call) {
       Variable* var = proxy->var();
       bool known_global_function = false;
@@ -7588,6 +7435,14 @@
 
   const Runtime::Function* function = expr->function();
   ASSERT(function != NULL);
+
+  if (static_cast<int>(function->function_id)
+      == static_cast<int>(Runtime::kNeverOptimize)
+      && expr->arguments()->length() == 0) {
+    // %NeverOptimize() without arguments marks the caller as never optimize.
+    return Bailout("function marked itself as never optimize");
+  }
+
   if (function->intrinsic_type == Runtime::INLINE) {
     ASSERT(expr->name()->length() > 0);
     ASSERT(expr->name()->Get(0) == '_');
@@ -7691,7 +7546,7 @@
 void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
-  Handle<Type> operand_type = expr->expression()->lower_type();
+  Handle<Type> operand_type = expr->expression()->bounds().lower;
   HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::SUB);
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
@@ -7700,7 +7555,7 @@
 void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
-  Handle<Type> operand_type = expr->expression()->lower_type();
+  Handle<Type> operand_type = expr->expression()->bounds().lower;
   HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::BIT_NOT);
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
@@ -8036,9 +7891,9 @@
     HValue* left,
     HValue* right) {
   HValue* context = environment()->LookupContext();
-  Handle<Type> left_type = expr->left()->lower_type();
-  Handle<Type> right_type = expr->right()->lower_type();
-  Handle<Type> result_type = expr->lower_type();
+  Handle<Type> left_type = expr->left()->bounds().lower;
+  Handle<Type> right_type = expr->right()->bounds().lower;
+  Handle<Type> result_type = expr->bounds().lower;
   Maybe<int> fixed_right_arg = expr->fixed_right_arg();
   Representation left_rep = Representation::FromType(left_type);
   Representation right_rep = Representation::FromType(right_type);
@@ -8360,8 +8215,8 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  Handle<Type> left_type = expr->left()->lower_type();
-  Handle<Type> right_type = expr->right()->lower_type();
+  Handle<Type> left_type = expr->left()->bounds().lower;
+  Handle<Type> right_type = expr->right()->bounds().lower;
   Handle<Type> combined_type = expr->combined_type();
   Representation combined_rep = Representation::FromType(combined_type);
   Representation left_rep = Representation::FromType(left_type);
diff --git a/src/hydrogen.h b/src/hydrogen.h
index f811867..7eb4243 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -233,14 +233,21 @@
 
 class HInstructionIterator BASE_EMBEDDED {
  public:
-  explicit HInstructionIterator(HBasicBlock* block) : instr_(block->first()) { }
+  explicit HInstructionIterator(HBasicBlock* block)
+      : instr_(block->first()) {
+    next_ = Done() ? NULL : instr_->next();
+  }
 
-  bool Done() { return instr_ == NULL; }
-  HInstruction* Current() { return instr_; }
-  void Advance() { instr_ = instr_->next(); }
+  inline bool Done() const { return instr_ == NULL; }
+  inline HInstruction* Current() { return instr_; }
+  inline void Advance() {
+    instr_ = next_;
+    next_ = Done() ? NULL : instr_->next();
+  }
 
  private:
   HInstruction* instr_;
+  HInstruction* next_;
 };
 
 
@@ -291,18 +298,13 @@
   HEnvironment* start_environment() const { return start_environment_; }
 
   void FinalizeUniqueValueIds();
-  void InsertTypeConversions();
-  void MergeRemovableSimulates();
   void MarkDeoptimizeOnUndefined();
   bool ProcessArgumentsObject();
-  void Canonicalize();
   void OrderBlocks();
   void AssignDominators();
   void SetupInformativeDefinitions();
-  void DehoistSimpleArrayIndexComputations();
   void RestoreActualValues();
   void PropagateDeoptimizingMark();
-  void AnalyzeAndPruneEnvironmentLiveness();
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
@@ -447,7 +449,6 @@
 
   void MarkAsDeoptimizingRecursively(HBasicBlock* block);
   void NullifyUnreachableInstructions();
-  void InsertTypeConversions(HInstruction* instr);
   void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
   void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
   void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 60b525f..e0ae006 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -65,7 +65,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     return XMMRegister::kNumAllocatableRegisters;
   } else {
-    return X87TopOfStackRegister::kNumAllocatableRegisters;
+    return X87Register::kNumAllocatableRegisters;
   }
 }
 
@@ -74,7 +74,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     return XMMRegister::kNumRegisters;
   } else {
-    return X87TopOfStackRegister::kNumRegisters;
+    return X87Register::kNumRegisters;
   }
 }
 
@@ -83,7 +83,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     return XMMRegister::AllocationIndexToString(index);
   } else {
-    return X87TopOfStackRegister::AllocationIndexToString(index);
+    return X87Register::AllocationIndexToString(index);
   }
 }
 
@@ -1784,6 +1784,12 @@
 }
 
 
+void Assembler::fmul_i(int i) {
+  EnsureSpace ensure_space(this);
+  emit_farith(0xD8, 0xC8, i);
+}
+
+
 void Assembler::fmul(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC8, i);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 353f265..8380897 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -229,30 +229,40 @@
 #define xmm7 (static_cast<const XMMRegister&>(double_register_7))
 
 
-struct X87TopOfStackRegister : IntelDoubleRegister {
-  static const int kNumAllocatableRegisters = 1;
-  static const int kNumRegisters = 1;
+struct X87Register : IntelDoubleRegister {
+  static const int kNumAllocatableRegisters = 5;
+  static const int kNumRegisters = 5;
 
-  bool is(X87TopOfStackRegister reg) const {
+  bool is(X87Register reg) const {
     return code_ == reg.code_;
   }
 
   static const char* AllocationIndexToString(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
-      "st0",
+      "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
     };
     return names[index];
   }
 
-  static int ToAllocationIndex(X87TopOfStackRegister reg) {
-    ASSERT(reg.code() == 0);
-    return 0;
+  static X87Register FromAllocationIndex(int index) {
+    STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
+    X87Register result;
+    result.code_ = index;
+    return result;
+  }
+
+  static int ToAllocationIndex(X87Register reg) {
+    return reg.code_;
   }
 };
 
-#define x87tos \
-  static_cast<const X87TopOfStackRegister&>(double_register_0)
+#define stX_0 static_cast<const X87Register&>(double_register_0)
+#define stX_1 static_cast<const X87Register&>(double_register_1)
+#define stX_2 static_cast<const X87Register&>(double_register_2)
+#define stX_3 static_cast<const X87Register&>(double_register_3)
+#define stX_4 static_cast<const X87Register&>(double_register_4)
 
 
 typedef IntelDoubleRegister DoubleRegister;
@@ -947,6 +957,7 @@
   void fadd(int i);
   void fsub(int i);
   void fmul(int i);
+  void fmul_i(int i);
   void fdiv(int i);
 
   void fisub_s(const Operand& adr);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 89f21e9..aee57dc 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -262,6 +262,17 @@
 }
 
 
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { eax, ebx, ecx, edx };
+  descriptor->register_param_count_ = 4;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 
@@ -651,131 +662,143 @@
 };
 
 
-// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
-                           Register source,
-                           bool use_sse3,
-                           Label* conversion_failure) {
-  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
-  Label done, right_exponent, normal_exponent;
-  Register scratch = ebx;
-  Register scratch2 = edi;
-  // Get exponent word.
-  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
-  // Get exponent alone in scratch2.
-  __ mov(scratch2, scratch);
-  __ and_(scratch2, HeapNumber::kExponentMask);
-  __ shr(scratch2, HeapNumber::kExponentShift);
-  __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
-  // Load ecx with zero.  We use this either for the final shift or
-  // for the answer.
-  __ xor_(ecx, ecx);
-  // If the exponent is above 83, the number contains no significant
-  // bits in the range 0..2^31, so the result is zero.
-  static const uint32_t kResultIsZeroExponent = 83;
-  __ cmp(scratch2, Immediate(kResultIsZeroExponent));
-  __ j(above, &done);
-  if (use_sse3) {
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+  Register input_reg = this->source();
+  Register final_result_reg = this->destination();
+  ASSERT(is_truncating());
+
+  Label check_negative, process_64_bits, done, done_no_stash;
+
+  int double_offset = offset();
+
+  // Account for return address and saved regs if input is esp.
+  if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
+
+  MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+  MemOperand exponent_operand(MemOperand(input_reg,
+                                         double_offset + kPointerSize));
+
+  Register scratch1;
+  {
+    Register scratch_candidates[3] = { ebx, edx, edi };
+    for (int i = 0; i < 3; i++) {
+      scratch1 = scratch_candidates[i];
+      if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+    }
+  }
+  // Since we must use ecx for shifts below, use some other register (eax)
+  // to calculate the result if ecx is the requested return register.
+  Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+  // Save ecx if it isn't the return register and therefore volatile, or if it
+  // is the return register, then save the temp register we use in its stead for
+  // the result.
+  Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+  __ push(scratch1);
+  __ push(save_reg);
+
+  bool stash_exponent_copy = !input_reg.is(esp);
+  __ mov(scratch1, mantissa_operand);
+  if (CpuFeatures::IsSupported(SSE3)) {
     CpuFeatureScope scope(masm, SSE3);
-    // Check whether the exponent is too big for a 64 bit signed integer.
-    static const uint32_t kTooBigExponent = 63;
-    __ cmp(scratch2, Immediate(kTooBigExponent));
-    __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
-    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
-    // Reserve space for 64 bit answer.
-    __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
+    __ fld_d(mantissa_operand);
+  }
+  __ mov(ecx, exponent_operand);
+  if (stash_exponent_copy) __ push(ecx);
+
+  __ and_(ecx, HeapNumber::kExponentMask);
+  __ shr(ecx, HeapNumber::kExponentShift);
+  __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+  __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+  __ j(below, &process_64_bits);
+
+  // Result is entirely in lower 32-bits of mantissa
+  int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+  if (CpuFeatures::IsSupported(SSE3)) {
+    __ fstp(0);
+  }
+  __ sub(ecx, Immediate(delta));
+  __ xor_(result_reg, result_reg);
+  __ cmp(ecx, Immediate(31));
+  __ j(above, &done);
+  __ shl_cl(scratch1);
+  __ jmp(&check_negative);
+
+  __ bind(&process_64_bits);
+  if (CpuFeatures::IsSupported(SSE3)) {
+    CpuFeatureScope scope(masm, SSE3);
+    if (stash_exponent_copy) {
+      // Already a copy of the exponent on the stack, overwrite it.
+      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+      __ sub(esp, Immediate(kDoubleSize / 2));
+    } else {
+      // Reserve space for 64 bit answer.
+      __ sub(esp, Immediate(kDoubleSize));  // Nolint.
+    }
     // Do conversion, which cannot fail because we checked the exponent.
     __ fisttp_d(Operand(esp, 0));
-    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
+    __ mov(result_reg, Operand(esp, 0));  // Load low word of answer as result
+    __ add(esp, Immediate(kDoubleSize));
+    __ jmp(&done_no_stash);
   } else {
-    // Check whether the exponent matches a 32 bit signed int that cannot be
-    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
-    // exponent is 30 (biased).  This is the exponent that we are fastest at and
-    // also the highest exponent we can handle here.
-    const uint32_t non_smi_exponent = 30;
-    __ cmp(scratch2, Immediate(non_smi_exponent));
-    // If we have a match of the int32-but-not-Smi exponent then skip some
-    // logic.
-    __ j(equal, &right_exponent, Label::kNear);
-    // If the exponent is higher than that then go to slow case.  This catches
-    // numbers that don't fit in a signed int32, infinities and NaNs.
-    __ j(less, &normal_exponent, Label::kNear);
-
-    {
-      // Handle a big exponent.  The only reason we have this code is that the
-      // >>> operator has a tendency to generate numbers with an exponent of 31.
-      const uint32_t big_non_smi_exponent = 31;
-      __ cmp(scratch2, Immediate(big_non_smi_exponent));
-      __ j(not_equal, conversion_failure);
-      // We have the big exponent, typically from >>>.  This means the number is
-      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
-      __ mov(scratch2, scratch);
-      __ and_(scratch2, HeapNumber::kMantissaMask);
-      // Put back the implicit 1.
-      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
-      // Shift up the mantissa bits to take up the space the exponent used to
-      // take. We just orred in the implicit bit so that took care of one and
-      // we want to use the full unsigned range so we subtract 1 bit from the
-      // shift distance.
-      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
-      __ shl(scratch2, big_shift_distance);
-      // Get the second half of the double.
-      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
-      // Shift down 21 bits to get the most significant 11 bits or the low
-      // mantissa word.
-      __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, scratch2);
-      // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, scratch);
-      __ j(positive, &done, Label::kNear);
-      __ neg(ecx);
-      __ jmp(&done, Label::kNear);
+    // Result must be extracted from shifted 32-bit mantissa
+    __ sub(ecx, Immediate(delta));
+    __ neg(ecx);
+    if (stash_exponent_copy) {
+      __ mov(result_reg, MemOperand(esp, 0));
+    } else {
+      __ mov(result_reg, exponent_operand);
     }
-
-    __ bind(&normal_exponent);
-    // Exponent word in scratch, exponent in scratch2. Zero in ecx.
-    // We know that 0 <= exponent < 30.
-    __ mov(ecx, Immediate(30));
-    __ sub(ecx, scratch2);
-
-    __ bind(&right_exponent);
-    // Here ecx is the shift, scratch is the exponent word.
-    // Get the top bits of the mantissa.
-    __ and_(scratch, HeapNumber::kMantissaMask);
-    // Put back the implicit 1.
-    __ or_(scratch, 1 << HeapNumber::kExponentShift);
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We have kExponentShift + 1 significant bits int he low end of the
-    // word.  Shift them to the top bits.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ shl(scratch, shift_distance);
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the most significant 10 bits or the low
-    // mantissa word.
-    __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, scratch);
-    // Move down according to the exponent.
-    __ shr_cl(scratch2);
-    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
-    // we may need to fix the sign.
-    Label negative;
-    __ xor_(ecx, ecx);
-    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
-    __ j(greater, &negative, Label::kNear);
-    __ mov(ecx, scratch2);
-    __ jmp(&done, Label::kNear);
-    __ bind(&negative);
-    __ sub(ecx, scratch2);
+    __ and_(result_reg,
+            Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+    __ add(result_reg,
+           Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+    __ shrd(result_reg, scratch1);
+    __ shr_cl(result_reg);
+    __ test(ecx, Immediate(32));
+    if (CpuFeatures::IsSupported(CMOV)) {
+      CpuFeatureScope use_cmov(masm, CMOV);
+      __ cmov(not_equal, scratch1, result_reg);
+    } else {
+      Label skip_mov;
+      __ j(equal, &skip_mov, Label::kNear);
+      __ mov(scratch1, result_reg);
+      __ bind(&skip_mov);
+    }
   }
+
+  // If the double was negative, negate the integer result.
+  __ bind(&check_negative);
+  __ mov(result_reg, scratch1);
+  __ neg(result_reg);
+  if (stash_exponent_copy) {
+    __ cmp(MemOperand(esp, 0), Immediate(0));
+  } else {
+    __ cmp(exponent_operand, Immediate(0));
+  }
+  if (CpuFeatures::IsSupported(CMOV)) {
+    CpuFeatureScope use_cmov(masm, CMOV);
+    __ cmov(greater, result_reg, scratch1);
+  } else {
+    Label skip_mov;
+    __ j(less_equal, &skip_mov, Label::kNear);
+    __ mov(result_reg, scratch1);
+    __ bind(&skip_mov);
+  }
+
+  // Restore registers
   __ bind(&done);
+  if (stash_exponent_copy) {
+    __ add(esp, Immediate(kDoubleSize / 2));
+  }
+  __ bind(&done_no_stash);
+  if (!final_result_reg.is(result_reg)) {
+    ASSERT(final_result_reg.is(ecx));
+    __ mov(final_result_reg, result_reg);
+  }
+  __ pop(save_reg);
+  __ pop(scratch1);
+  __ ret(0);
 }
 
 
@@ -2396,7 +2419,9 @@
     CpuFeatureScope use_sse2(masm, SSE2);
     ConvertHeapNumberToInt32(masm, edx, conversion_failure);
   } else {
-    IntegerConvert(masm, edx, use_sse3, conversion_failure);
+    DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+                       true);
+    __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
   }
   __ mov(edx, ecx);
 
@@ -2431,7 +2456,9 @@
     CpuFeatureScope use_sse2(masm, SSE2);
     ConvertHeapNumberToInt32(masm, eax, conversion_failure);
   } else {
-    IntegerConvert(masm, eax, use_sse3, conversion_failure);
+    DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+                       true);
+    __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
   }
 
   __ bind(&done);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 6af2445..505cd4f 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -566,15 +566,11 @@
   // Get the bailout id from the stack.
   __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
 
-  // Get the address of the location in the code object if possible
+  // Get the address of the location in the code object
   // and compute the fp-to-sp delta in register edx.
-  if (type() == EAGER || type() == SOFT) {
-    __ Set(ecx, Immediate(0));
-    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
-  } else {
-    __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
-    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
-  }
+  __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+  __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
   __ sub(edx, ebp);
   __ neg(edx);
 
@@ -620,12 +616,8 @@
   // and check that the generated code never deoptimizes with unbalanced stack.
   __ fnclex();
 
-  // Remove the bailout id and the double registers from the stack.
-  if (type() == EAGER || type() == SOFT) {
-    __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
-  } else {
-    __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
-  }
+  // Remove the bailout id, return address and the double registers.
+  __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index ac82dcf..46d0c1f 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -353,7 +353,6 @@
     instr->CompileToNative(this);
 
     if (!CpuFeatures::IsSupported(SSE2)) {
-      ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(x87_stack_depth_);
       }
@@ -365,8 +364,7 @@
 
 
 bool LCodeGen::GenerateJumpTable() {
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
+  Label needs_frame;
   if (jump_table_.length() > 0) {
     Comment(";;; -------------------- Jump table --------------------");
   }
@@ -382,56 +380,32 @@
     }
     if (jump_table_[i].needs_frame) {
       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
-      if (type == Deoptimizer::LAZY) {
-        if (needs_frame_is_call.is_bound()) {
-          __ jmp(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-          // Push a PC inside the function so that the deopt code can find where
-          // the deopt comes from. It doesn't have to be the precise return
-          // address of a "calling" LAZY deopt, it only has to be somewhere
-          // inside the code body.
-          Label push_approx_pc;
-          __ call(&push_approx_pc);
-          __ bind(&push_approx_pc);
-          // Push the continuation which was stashed were the ebp should
-          // be. Replace it with the saved ebp.
-          __ push(MemOperand(esp, 3 * kPointerSize));
-          __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
-          __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
-          __ ret(0);  // Call the continuation without clobbering registers.
-        }
+      if (needs_frame.is_bound()) {
+        __ jmp(&needs_frame);
       } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ jmp(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-          // Push the continuation which was stashed were the ebp should
-          // be. Replace it with the saved ebp.
-          __ push(MemOperand(esp, 2 * kPointerSize));
-          __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
-          __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
-          __ ret(0);  // Call the continuation without clobbering registers.
-        }
+        __ bind(&needs_frame);
+        __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+        // This variant of deopt can only be used with stubs. Since we don't
+        // have a function pointer to install in the stack frame that we're
+        // building, install a special marker there instead.
+        ASSERT(info()->IsStub());
+        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        // Push a PC inside the function so that the deopt code can find where
+        // the deopt comes from. It doesn't have to be the precise return
+        // address of a "calling" LAZY deopt, it only has to be somewhere
+        // inside the code body.
+        Label push_approx_pc;
+        __ call(&push_approx_pc);
+        __ bind(&push_approx_pc);
+        // Push the continuation which was stashed were the ebp should
+        // be. Replace it with the saved ebp.
+        __ push(MemOperand(esp, 3 * kPointerSize));
+        __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+        __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+        __ ret(0);  // Call the continuation without clobbering registers.
       }
     } else {
-      if (type == Deoptimizer::LAZY) {
-        __ call(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      }
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
     }
   }
   return !is_aborted();
@@ -501,59 +475,166 @@
 }
 
 
+X87Register LCodeGen::ToX87Register(int index) const {
+  return X87Register::FromAllocationIndex(index);
+}
+
+
 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
   return XMMRegister::FromAllocationIndex(index);
 }
 
 
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
-  return op->IsDoubleRegister();
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+  ASSERT(X87StackContains(reg));
+  X87Fxch(reg);
+  x87_stack_depth_--;
 }
 
 
-void LCodeGen::ReadX87Operand(Operand dst) {
-  ASSERT(x87_stack_depth_ == 1);
+void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
+  ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
+  int i  = X87ArrayIndex(reg);
+  int st = x87_st2idx(i);
+  if (st != other_slot) {
+    int other_i = x87_st2idx(other_slot);
+    X87Register other   = x87_stack_[other_i];
+    x87_stack_[other_i] = reg;
+    x87_stack_[i]       = other;
+    if (st == 0) {
+      __ fxch(other_slot);
+    } else if (other_slot == 0) {
+      __ fxch(st);
+    } else {
+      __ fxch(st);
+      __ fxch(other_slot);
+      __ fxch(st);
+    }
+  }
+}
+
+
+int LCodeGen::x87_st2idx(int pos) {
+  return x87_stack_depth_ - pos - 1;
+}
+
+
+int LCodeGen::X87ArrayIndex(X87Register reg) {
+  for (int i = 0; i < x87_stack_depth_; i++) {
+    if (x87_stack_[i].is(reg)) return i;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+bool LCodeGen::X87StackContains(X87Register reg) {
+  for (int i = 0; i < x87_stack_depth_; i++) {
+    if (x87_stack_[i].is(reg)) return true;
+  }
+  return false;
+}
+
+
+void LCodeGen::X87Free(X87Register reg) {
+  ASSERT(X87StackContains(reg));
+  int i  = X87ArrayIndex(reg);
+  int st = x87_st2idx(i);
+  if (st > 0) {
+    // keep track of how fstp(i) changes the order of elements
+    int tos_i = x87_st2idx(0);
+    x87_stack_[i] = x87_stack_[tos_i];
+  }
+  x87_stack_depth_--;
+  __ fstp(st);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+  if (X87StackContains(dst)) {
+    X87Fxch(dst);
+    __ fstp(0);
+  } else {
+    ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+    x87_stack_[x87_stack_depth_] = dst;
+    x87_stack_depth_++;
+  }
+  X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+  if (opts == kX87DoubleOperand) {
+    __ fld_d(src);
+  } else if (opts == kX87FloatOperand) {
+    __ fld_s(src);
+  } else if (opts == kX87IntOperand) {
+    __ fild_s(src);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src) {
+  X87Fxch(src);
   __ fst_d(dst);
 }
 
 
-void LCodeGen::PushX87DoubleOperand(Operand src) {
-  ASSERT(x87_stack_depth_ == 0);
-  x87_stack_depth_++;
-  __ fld_d(src);
-}
-
-
-void LCodeGen::PushX87FloatOperand(Operand src) {
-  ASSERT(x87_stack_depth_ == 0);
-  x87_stack_depth_++;
-  __ fld_s(src);
-}
-
-
-void LCodeGen::PopX87() {
-  ASSERT(x87_stack_depth_ == 1);
-  x87_stack_depth_--;
-  __ fstp(0);
-}
-
-
-void LCodeGen::CurrentInstructionReturnsX87Result() {
-  ASSERT(x87_stack_depth_ <= 1);
-  if (x87_stack_depth_ == 0) {
-    x87_stack_depth_ = 1;
+void LCodeGen::X87PrepareToWrite(X87Register reg) {
+  if (X87StackContains(reg)) {
+    X87Free(reg);
   }
+  // Mark this register as the next register to write to
+  x87_stack_[x87_stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87CommitWrite(X87Register reg) {
+  // Assert the reg is prepared to write, but not on the virtual stack yet
+  ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
+      x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+  x87_stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+    X87Register left, X87Register right, X87Register result) {
+  // You need to use DefineSameAsFirst for x87 instructions
+  ASSERT(result.is(left));
+  X87Fxch(right, 1);
+  X87Fxch(left);
 }
 
 
 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
-  if (x87_stack_depth_ > 0) {
-    if ((instr->ClobbersDoubleRegisters() ||
-         instr->HasDoubleRegisterResult()) &&
-        !instr->HasDoubleRegisterInput()) {
-      PopX87();
+  if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+    bool double_inputs = instr->HasDoubleRegisterInput();
+
+    // Flush stack from tos down, since FreeX87() will mess with tos
+    for (int i = x87_stack_depth_-1; i >= 0; i--) {
+      X87Register reg = x87_stack_[i];
+      // Skip registers which contain the inputs for the next instruction
+      // when flushing the stack
+      if (double_inputs && instr->IsDoubleInput(reg, this)) {
+        continue;
+      }
+      X87Free(reg);
+      if (i < x87_stack_depth_-1) i++;
     }
   }
+  if (instr->IsReturn()) {
+    while (x87_stack_depth_ > 0) {
+      __ fstp(0);
+      x87_stack_depth_--;
+    }
+  }
+}
+
+
+void LCodeGen::EmitFlushX87ForDeopt() {
+  for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
 }
 
 
@@ -563,6 +644,12 @@
 }
 
 
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToX87Register(op->index());
+}
+
+
 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   ASSERT(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
@@ -835,8 +922,6 @@
                             Deoptimizer::BailoutType bailout_type) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
-  // It's an error to deoptimize with the x87 fp stack in use.
-  ASSERT(x87_stack_depth_ == 0);
   int id = environment->deoptimization_index();
   ASSERT(info()->IsOptimizing() || info()->IsStub());
   Address entry =
@@ -874,22 +959,27 @@
     __ popfd();
   }
 
+  // Before Instructions which can deopt, we normally flush the x87 stack. But
+  // we can have inputs or outputs of the current instruction on the stack,
+  // thus we need to flush them here from the physical stack to leave it in a
+  // consistent state.
+  if (x87_stack_depth_ > 0) {
+    Label done;
+    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+    EmitFlushX87ForDeopt();
+    __ bind(&done);
+  }
+
   if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
     Label done;
-    if (cc != no_condition) {
-      __ j(NegateCondition(cc), &done, Label::kNear);
-    }
+    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
     __ int3();
     __ bind(&done);
   }
 
   ASSERT(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
-    if (bailout_type == Deoptimizer::LAZY) {
-      __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    } else {
-      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-    }
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
@@ -1721,11 +1811,10 @@
   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
 
   if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
-    __ push(Immediate(lower));
     __ push(Immediate(upper));
-    PushX87DoubleOperand(Operand(esp, 0));
+    __ push(Immediate(lower));
+    X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
     __ add(Operand(esp), Immediate(kDoubleSize));
-    CurrentInstructionReturnsX87Result();
   } else {
     CpuFeatureScope scope1(masm(), SSE2);
     ASSERT(instr->result()->IsDoubleRegister());
@@ -1990,62 +2079,67 @@
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  CpuFeatureScope scope(masm(), SSE2);
-  XMMRegister left = ToDoubleRegister(instr->left());
-  XMMRegister right = ToDoubleRegister(instr->right());
-  XMMRegister result = ToDoubleRegister(instr->result());
-  // Modulo uses a fixed result register.
-  ASSERT(instr->op() == Token::MOD || left.is(result));
-  switch (instr->op()) {
-    case Token::ADD:
-      __ addsd(left, right);
-      break;
-    case Token::SUB:
-      __ subsd(left, right);
-      break;
-    case Token::MUL:
-      __ mulsd(left, right);
-      break;
-    case Token::DIV:
-      __ divsd(left, right);
-      // Don't delete this mov. It may improve performance on some CPUs,
-      // when there is a mulsd depending on the result
-      __ movaps(left, left);
-      break;
-    case Token::MOD: {
-      // Pass two doubles as arguments on the stack.
-      __ PrepareCallCFunction(4, eax);
-      __ movdbl(Operand(esp, 0 * kDoubleSize), left);
-      __ movdbl(Operand(esp, 1 * kDoubleSize), right);
-      __ CallCFunction(
-          ExternalReference::double_fp_operation(Token::MOD, isolate()),
-          4);
+  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+    CpuFeatureScope scope(masm(), SSE2);
+    XMMRegister left = ToDoubleRegister(instr->left());
+    XMMRegister right = ToDoubleRegister(instr->right());
+    XMMRegister result = ToDoubleRegister(instr->result());
+    // Modulo uses a fixed result register.
+    ASSERT(instr->op() == Token::MOD || left.is(result));
+    switch (instr->op()) {
+      case Token::ADD:
+        __ addsd(left, right);
+        break;
+      case Token::SUB:
+        __ subsd(left, right);
+        break;
+      case Token::MUL:
+        __ mulsd(left, right);
+        break;
+      case Token::DIV:
+        __ divsd(left, right);
+        // Don't delete this mov. It may improve performance on some CPUs,
+        // when there is a mulsd depending on the result
+        __ movaps(left, left);
+        break;
+      case Token::MOD: {
+        // Pass two doubles as arguments on the stack.
+        __ PrepareCallCFunction(4, eax);
+        __ movdbl(Operand(esp, 0 * kDoubleSize), left);
+        __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+        __ CallCFunction(
+            ExternalReference::double_fp_operation(Token::MOD, isolate()),
+            4);
 
-      // Return value is in st(0) on ia32.
-      // Store it into the (fixed) result register.
-      __ sub(Operand(esp), Immediate(kDoubleSize));
-      __ fstp_d(Operand(esp, 0));
-      __ movdbl(result, Operand(esp, 0));
-      __ add(Operand(esp), Immediate(kDoubleSize));
-      break;
+        // Return value is in st(0) on ia32.
+        // Store it into the (fixed) result register.
+        __ sub(Operand(esp), Immediate(kDoubleSize));
+        __ fstp_d(Operand(esp, 0));
+        __ movdbl(result, Operand(esp, 0));
+        __ add(Operand(esp), Immediate(kDoubleSize));
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
     }
-    default:
-      UNREACHABLE();
-      break;
+  } else {
+    X87Register left = ToX87Register(instr->left());
+    X87Register right = ToX87Register(instr->right());
+    X87Register result = ToX87Register(instr->result());
+    X87PrepareBinaryOp(left, right, result);
+    switch (instr->op()) {
+      case Token::MUL:
+        __ fmul_i(1);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
   }
 }
 
 
-void LCodeGen::DoNegateNoSSE2D(LNegateNoSSE2D* instr) {
-  __ push(Immediate(-1));
-  __ fild_s(Operand(esp, 0));
-  __ add(esp, Immediate(kPointerSize));
-  __ fmulp();
-  CurrentInstructionReturnsX87Result();
-}
-
-
-
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->left()).is(edx));
@@ -2963,8 +3057,7 @@
       XMMRegister result = ToDoubleRegister(instr->result());
       __ movdbl(result, FieldOperand(object, offset));
     } else {
-      PushX87DoubleOperand(FieldOperand(object, offset));
-      CurrentInstructionReturnsX87Result();
+      X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
     }
     return;
   }
@@ -3209,16 +3302,14 @@
       __ movss(result, operand);
       __ cvtss2sd(result, result);
     } else {
-      PushX87FloatOperand(operand);
-      CurrentInstructionReturnsX87Result();
+      X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
     }
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
       __ movdbl(ToDoubleRegister(instr->result()), operand);
     } else {
-      PushX87DoubleOperand(operand);
-      CurrentInstructionReturnsX87Result();
+      X87Mov(ToX87Register(instr->result()), operand);
     }
   } else {
     Register result(ToRegister(instr->result()));
@@ -3289,8 +3380,7 @@
     XMMRegister result = ToDoubleRegister(instr->result());
     __ movdbl(result, double_load_operand);
   } else {
-    PushX87DoubleOperand(double_load_operand);
-    CurrentInstructionReturnsX87Result();
+    X87Mov(ToX87Register(instr->result()), double_load_operand);
   }
 }
 
@@ -4284,7 +4374,8 @@
       XMMRegister value = ToDoubleRegister(instr->value());
       __ movdbl(FieldOperand(object, offset), value);
     } else {
-      __ fstp_d(FieldOperand(object, offset));
+      X87Register value = ToX87Register(instr->value());
+      X87Mov(FieldOperand(object, offset), value);
     }
     return;
   }
@@ -4410,7 +4501,7 @@
       CpuFeatureScope scope(masm(), SSE2);
       __ movdbl(operand, ToDoubleRegister(instr->value()));
     } else {
-      __ fst_d(operand);
+      X87Mov(operand, ToX87Register(instr->value()));
     }
   } else {
     Register value = ToRegister(instr->value());
@@ -4492,7 +4583,8 @@
       __ mov(double_store_operand2, Immediate(upper));
     } else {
       Label no_special_nan_handling;
-      ASSERT(x87_stack_depth_ > 0);
+      X87Register value = ToX87Register(instr->value());
+      X87Fxch(value);
 
       if (instr->NeedsCanonicalization()) {
         __ fld(0);
@@ -4962,10 +5054,16 @@
     convert_hole = load->UsesMustHandleHole();
   }
 
+  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+  if (!use_sse2) {
+    // Put the value to the top of stack
+    X87Register src = ToX87Register(instr->value());
+    X87LoadForUsage(src);
+  }
+
   Label no_special_nan_handling;
   Label done;
   if (convert_hole) {
-    bool use_sse2 = CpuFeatures::IsSupported(SSE2);
     if (use_sse2) {
       CpuFeatureScope scope(masm(), SSE2);
       XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -5019,12 +5117,12 @@
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  if (CpuFeatures::IsSupported(SSE2)) {
+  if (use_sse2) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister input_reg = ToDoubleRegister(instr->value());
     __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   } else {
-    __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
+    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   }
   __ bind(&done);
 }
@@ -5075,12 +5173,14 @@
 
 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
                                       Register temp_reg,
+                                      X87Register res_reg,
                                       bool allow_undefined_as_nan,
                                       bool deoptimize_on_minus_zero,
                                       LEnvironment* env,
                                       NumberUntagDMode mode) {
   Label load_smi, done;
 
+  X87PrepareToWrite(res_reg);
   STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
                 NUMBER_CANDIDATE_IS_ANY_TAGGED);
   if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -5141,6 +5241,7 @@
   __ pop(input_reg);
   __ SmiTag(input_reg);  // Retag smi.
   __ bind(&done);
+  X87CommitWrite(res_reg);
 }
 
 
@@ -5522,11 +5623,11 @@
   } else {
     EmitNumberUntagDNoSSE2(input_reg,
                            temp_reg,
+                           ToX87Register(instr->result()),
                            instr->hydrogen()->allow_undefined_as_nan(),
                            deoptimize_on_minus_zero,
                            instr->environment(),
                            mode);
-    CurrentInstructionReturnsX87Result();
   }
 }
 
@@ -5541,93 +5642,22 @@
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
 
+  __ cvttsd2si(result_reg, Operand(input_reg));
+
   if (instr->truncating()) {
     // Performs a truncating conversion of a floating point number as used by
     // the JS bitwise operations.
-    __ cvttsd2si(result_reg, Operand(input_reg));
+    Label fast_case_succeeded;
     __ cmp(result_reg, 0x80000000u);
-    if (CpuFeatures::IsSupported(SSE3)) {
-      // This will deoptimize if the exponent of the input in out of range.
-      CpuFeatureScope scope(masm(), SSE3);
-      Label convert, done;
-      __ j(not_equal, &done, Label::kNear);
-      __ sub(Operand(esp), Immediate(kDoubleSize));
-      __ movdbl(Operand(esp, 0), input_reg);
-      // Get exponent alone and check for too-big exponent.
-      __ mov(result_reg, Operand(esp, sizeof(int32_t)));
-      __ and_(result_reg, HeapNumber::kExponentMask);
-      const uint32_t kTooBigExponent =
-          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-      __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
-      __ j(less, &convert, Label::kNear);
-      __ add(Operand(esp), Immediate(kDoubleSize));
-      DeoptimizeIf(no_condition, instr->environment());
-      __ bind(&convert);
-      // Do conversion, which cannot fail because we checked the exponent.
-      __ fld_d(Operand(esp, 0));
-      __ fisttp_d(Operand(esp, 0));
-      __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
-      __ add(Operand(esp), Immediate(kDoubleSize));
-      __ bind(&done);
-    } else {
-      Label done;
-      Register temp_reg = ToRegister(instr->temp());
-      XMMRegister xmm_scratch = xmm0;
-
-      // If cvttsd2si succeeded, we're done. Otherwise, we attempt
-      // manual conversion.
-      __ j(not_equal, &done, Label::kNear);
-
-      // Get high 32 bits of the input in result_reg and temp_reg.
-      __ pshufd(xmm_scratch, input_reg, 1);
-      __ movd(Operand(temp_reg), xmm_scratch);
-      __ mov(result_reg, temp_reg);
-
-      // Prepare negation mask in temp_reg.
-      __ sar(temp_reg, kBitsPerInt - 1);
-
-      // Extract the exponent from result_reg and subtract adjusted
-      // bias from it. The adjustment is selected in a way such that
-      // when the difference is zero, the answer is in the low 32 bits
-      // of the input, otherwise a shift has to be performed.
-      __ shr(result_reg, HeapNumber::kExponentShift);
-      __ and_(result_reg,
-              HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
-      __ sub(Operand(result_reg),
-             Immediate(HeapNumber::kExponentBias +
-                       HeapNumber::kExponentBits +
-                       HeapNumber::kMantissaBits));
-      // Don't handle big (> kMantissaBits + kExponentBits == 63) or
-      // special exponents.
-      DeoptimizeIf(greater, instr->environment());
-
-      // Zero out the sign and the exponent in the input (by shifting
-      // it to the left) and restore the implicit mantissa bit,
-      // i.e. convert the input to unsigned int64 shifted left by
-      // kExponentBits.
-      ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
-      // Minus zero has the most significant bit set and the other
-      // bits cleared.
-      __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
-      __ psllq(input_reg, HeapNumber::kExponentBits);
-      __ por(input_reg, xmm_scratch);
-
-      // Get the amount to shift the input right in xmm_scratch.
-      __ neg(result_reg);
-      __ movd(xmm_scratch, Operand(result_reg));
-
-      // Shift the input right and extract low 32 bits.
-      __ psrlq(input_reg, xmm_scratch);
-      __ movd(Operand(result_reg), input_reg);
-
-      // Use the prepared mask in temp_reg to negate the result if necessary.
-      __ xor_(result_reg, Operand(temp_reg));
-      __ sub(result_reg, Operand(temp_reg));
-      __ bind(&done);
-    }
+    __ j(not_equal, &fast_case_succeeded);
+    __ sub(esp, Immediate(kDoubleSize));
+    __ movdbl(MemOperand(esp, 0), input_reg);
+    DoubleToIStub stub(esp, result_reg, 0, true);
+    __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+    __ add(esp, Immediate(kDoubleSize));
+    __ bind(&fast_case_succeeded);
   } else {
     Label done;
-    __ cvttsd2si(result_reg, Operand(input_reg));
     __ cvtsi2sd(xmm0, Operand(result_reg));
     __ ucomisd(xmm0, input_reg);
     DeoptimizeIf(not_equal, instr->environment());
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 7e66ac2..1cbe4fe 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -105,7 +105,7 @@
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
-  bool IsX87TopOfStack(LOperand* op) const;
+  X87Register ToX87Register(LOperand* op) const;
 
   bool IsInteger32(LConstantOperand* op) const;
   bool IsSmi(LConstantOperand* op) const;
@@ -118,14 +118,20 @@
   double ToDouble(LConstantOperand* op) const;
 
   // Support for non-sse2 (x87) floating point stack handling.
-  // These functions maintain the depth of the stack (either 0 or 1)
-  void PushX87DoubleOperand(Operand src);
-  void PushX87FloatOperand(Operand src);
-  void ReadX87Operand(Operand dst);
-  bool X87StackNonEmpty() const { return x87_stack_depth_ > 0; }
-  void PopX87();
-  void CurrentInstructionReturnsX87Result();
-  void FlushX87StackIfNecessary(LInstruction* instr);
+  // These functions maintain the mapping of physical stack registers to our
+  // virtual registers between instructions.
+  enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+  void X87Mov(X87Register reg, Operand src,
+      X87OperandType operand = kX87DoubleOperand);
+  void X87Mov(Operand src, X87Register reg);
+
+  void X87PrepareBinaryOp(
+      X87Register left, X87Register right, X87Register result);
+
+  void X87LoadForUsage(X87Register reg);
+  void X87PrepareToWrite(X87Register reg);
+  void X87CommitWrite(X87Register reg);
 
   Handle<Object> ToHandle(LConstantOperand* op) const;
 
@@ -292,6 +298,7 @@
 
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
+  X87Register ToX87Register(int index) const;
   int ToInteger32(LConstantOperand* op) const;
 
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
@@ -331,6 +338,7 @@
   void EmitNumberUntagDNoSSE2(
       Register input,
       Register temp,
+      X87Register res_reg,
       bool allow_undefined_as_nan,
       bool deoptimize_on_minus_zero,
       LEnvironment* env,
@@ -392,6 +400,16 @@
   // register, or a stack slot operand.
   void EmitPushTaggedOperand(LOperand* operand);
 
+  void X87Fxch(X87Register reg, int other_slot = 0);
+  void X87Fld(Operand src, X87OperandType opts);
+  void X87Free(X87Register reg);
+
+  void FlushX87StackIfNecessary(LInstruction* instr);
+  void EmitFlushX87ForDeopt();
+  bool X87StackContains(X87Register reg);
+  int X87ArrayIndex(X87Register reg);
+  int x87_st2idx(int pos);
+
   Zone* zone_;
   LPlatformChunk* const chunk_;
   MacroAssembler* const masm_;
@@ -413,6 +431,7 @@
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
   bool frame_is_built_;
+  X87Register x87_stack_[X87Register::kNumAllocatableRegisters];
   int x87_stack_depth_;
 
   // Builder that keeps track of safepoints in the code. The table
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index a75ce21..e884a9d 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -332,10 +332,8 @@
       } else {
         __ push(Immediate(upper));
         __ push(Immediate(lower));
-        if (cgen_->X87StackNonEmpty()) {
-          cgen_->PopX87();
-        }
-        cgen_->PushX87DoubleOperand(MemOperand(esp, 0));
+        X87Register dst = cgen_->ToX87Register(destination);
+        cgen_->X87Mov(dst, MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       }
     } else {
@@ -367,10 +365,10 @@
     } else {
       // load from the register onto the stack, store in destination, which must
       // be a double stack slot in the non-SSE2 case.
-      ASSERT(source->index() == 0);  // source is on top of the stack
       ASSERT(destination->IsDoubleStackSlot());
       Operand dst = cgen_->ToOperand(destination);
-      cgen_->ReadX87Operand(dst);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
     }
   } else if (source->IsDoubleStackSlot()) {
     if (CpuFeatures::IsSupported(SSE2)) {
@@ -403,10 +401,8 @@
         __ mov(dst1, tmp);
       } else {
         Operand src = cgen_->ToOperand(source);
-        if (cgen_->X87StackNonEmpty()) {
-          cgen_->PopX87();
-        }
-        cgen_->PushX87DoubleOperand(src);
+        X87Register dst = cgen_->ToX87Register(destination);
+        cgen_->X87Mov(dst, src);
       }
     }
   } else {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index a8905b4..cd4a957 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -82,6 +82,17 @@
 }
 
 
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+  for (int i = 0; i < InputCount(); i++) {
+    LOperand* op = InputAt(i);
+    if (op != NULL && op->IsDoubleRegister()) {
+      if (cgen->ToX87Register(op).is(reg)) return true;
+    }
+  }
+  return false;
+}
+
+
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
 
@@ -494,12 +505,6 @@
 }
 
 
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-      X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -510,11 +515,6 @@
 }
 
 
-LOperand* LChunkBuilder::UseX87TopOfStack(HValue* value) {
-  return Use(value, ToUnallocated(x87tos));
-}
-
-
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
   return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
@@ -642,13 +642,6 @@
 }
 
 
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
-    LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, ToUnallocated(x87tos));
-}
-
-
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -1577,17 +1570,7 @@
     }
     return DefineSameAsFirst(mul);
   } else if (instr->representation().IsDouble()) {
-    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
-      return DoArithmeticD(Token::MUL, instr);
-    }
-    ASSERT(instr->right()->IsConstant() &&
-           static_cast<HConstant*>(instr->right())->DoubleValue() == -1);
-    // TODO(olivf) This is currently just a hack to support the UnaryOp Minus
-    // Stub. This will go away once we can use more than one X87 register,
-    // thus fully support binary instructions without SSE2.
-    LOperand* left = UseX87TopOfStack(instr->left());
-    LNegateNoSSE2D* result = new(zone()) LNegateNoSSE2D(left);
-    return DefineX87TOS(result);
+    return DoArithmeticD(Token::MUL, instr);
   } else {
     ASSERT(instr->representation().IsSmiOrTagged());
     return DoArithmeticT(Token::MUL, instr);
@@ -1937,11 +1920,7 @@
                        ? TempRegister()
                        : NULL;
       LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
-      if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
-        return AssignEnvironment(DefineAsRegister(res));
-      } else {
-        return AssignEnvironment(DefineX87TOS(res));
-      }
+      return AssignEnvironment(DefineAsRegister(res));
     } else if (to.IsSmi()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -1976,9 +1955,7 @@
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
       info()->MarkAsDeferredCalling();
-      LOperand* value = CpuFeatures::IsSupported(SSE2)
-          ? UseRegisterAtStart(instr->value())
-          : UseAtStart(instr->value());
+      LOperand* value = UseRegisterAtStart(instr->value());
       LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
 
       // Make sure that temp and result_temp are different registers.
@@ -2140,12 +2117,8 @@
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
     bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
-    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
-      LOperand* temp = value_is_zero ? NULL : TempRegister();
-      return DefineAsRegister(new(zone()) LConstantD(temp));
-    } else {
-      return DefineX87TOS(new(zone()) LConstantD(NULL));
-    }
+    LOperand* temp = value_is_zero ? NULL : TempRegister();
+    return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsTagged()) {
     return DefineAsRegister(new(zone()) LConstantT);
   } else {
@@ -2337,11 +2310,7 @@
     if (instr->value()->representation().IsDouble()) {
       LOperand* object = UseRegisterAtStart(instr->elements());
       LOperand* val = NULL;
-      if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
-        val = UseRegisterAtStart(instr->value());
-      } else if (!instr->IsConstantHoleStore()) {
-        val = UseX87TopOfStack(instr->value());
-      }
+      val = UseRegisterAtStart(instr->value());
       LOperand* key = UseRegisterOrConstantAtStart(instr->key());
       return new(zone()) LStoreKeyed(object, key, val);
     } else {
@@ -2471,11 +2440,7 @@
     val = UseTempRegister(instr->value());
   } else if (FLAG_track_double_fields &&
              instr->field_representation().IsDouble()) {
-    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
-      val = UseRegisterAtStart(instr->value());
-    } else {
-      val = UseX87TopOfStack(instr->value());
-    }
+    val = UseRegisterAtStart(instr->value());
   } else {
     val = UseRegister(instr->value());
   }
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4efde2e..56c9688 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -141,7 +141,6 @@
   V(MathTan)                                    \
   V(ModI)                                       \
   V(MulI)                                       \
-  V(NegateNoSSE2D)                              \
   V(NumberTagD)                                 \
   V(NumberTagI)                                 \
   V(NumberTagU)                                 \
@@ -265,7 +264,11 @@
   bool ClobbersTemps() const { return is_call_; }
   bool ClobbersRegisters() const { return is_call_; }
   virtual bool ClobbersDoubleRegisters() const {
-    return is_call_ || !CpuFeatures::IsSupported(SSE2);
+    return is_call_ ||
+      (!CpuFeatures::IsSupported(SSE2) &&
+       // We only have rudimentary X87Stack tracking, thus in general
+       // cannot handle deoptimization nor phi-nodes.
+       (HasEnvironment() || IsControl()));
   }
 
   virtual bool HasResult() const = 0;
@@ -273,6 +276,7 @@
 
   bool HasDoubleRegisterResult();
   bool HasDoubleRegisterInput();
+  bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
 
   LOperand* FirstInput() { return InputAt(0); }
   LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -377,7 +381,6 @@
 class LInstructionGap: public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-  virtual bool ClobbersDoubleRegisters() const { return false; }
 
   virtual bool HasInterestingComment(LCodeGen* gen) const {
     return !IsRedundant();
@@ -659,18 +662,6 @@
 };
 
 
-class LNegateNoSSE2D: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LNegateNoSSE2D(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(NegateNoSSE2D, "negate-no-sse2-d")
-};
-
-
 class LMulI: public LTemplateInstruction<1, 2, 1> {
  public:
   LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -1222,10 +1213,6 @@
     temps_[0] = temp;
   }
 
-  virtual bool ClobbersDoubleRegisters() const {
-    return false;
-  }
-
   LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -2206,9 +2193,7 @@
   LOperand* value() { return inputs_[0]; }
   LOperand* temp() { return temps_[0]; }
 
-  virtual bool ClobbersDoubleRegisters() const {
-    return false;
-  }
+  virtual bool ClobbersDoubleRegisters() const { return false; }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
   DECLARE_HYDROGEN_ACCESSOR(Change);
@@ -2852,14 +2837,13 @@
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
-  LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
+  LUnallocated* ToUnallocated(X87Register reg);
 
   // Methods for setting up define-use relationships.
   MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
   MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
   MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
                                            XMMRegister fixed_register);
-  MUST_USE_RESULT LOperand* UseX87TopOfStack(HValue* value);
 
   // A value that is guaranteed to be allocated to a register.
   // Operand created by UseRegister is guaranteed to be live until the end of
diff --git a/src/ic.cc b/src/ic.cc
index f10e748..f84b3b9 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -159,7 +159,7 @@
   JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
   // Find the function on the stack and both the active code for the
   // function and the original code.
-  JSFunction* function = JSFunction::cast(frame->function());
+  JSFunction* function = frame->function();
   Handle<SharedFunctionInfo> shared(function->shared(), isolate());
   Code* code = shared->code();
   ASSERT(Debug::HasDebugInfo(shared));
@@ -191,6 +191,17 @@
     if (Name::cast(name) != stub_name) return false;
   }
 
+  if (receiver->IsGlobalObject()) {
+    if (!name->IsName()) return false;
+    Isolate* isolate = target->GetIsolate();
+    LookupResult lookup(isolate);
+    GlobalObject* global = GlobalObject::cast(receiver);
+    global->LocalLookupRealNamedProperty(Name::cast(name), &lookup);
+    if (!lookup.IsFound()) return false;
+    PropertyCell* cell = global->GetPropertyCell(&lookup);
+    return cell->type()->IsConstant();
+  }
+
   InlineCacheHolderFlag cache_holder =
       Code::ExtractCacheHolderFromFlags(target->flags());
 
@@ -1862,7 +1873,7 @@
   KeyedAccessStoreMode old_store_mode =
       Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
   Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
-  if (ic_state == MONOMORPHIC && old_store_mode == STANDARD_STORE) {
+  if (ic_state == MONOMORPHIC) {
       // If the "old" and "new" maps are in the same elements map family, stay
       // MONOMORPHIC and use the map for the most generic ElementsKind.
     Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1875,16 +1886,16 @@
       store_mode = GetNonTransitioningStoreMode(store_mode);
       return isolate()->stub_cache()->ComputeKeyedStoreElement(
           transitioned_receiver_map, strict_mode, store_mode);
-    } else if (*previous_receiver_map == receiver->map()) {
-      if (IsGrowStoreMode(store_mode) ||
-          store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-          store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
-        // A "normal" IC that handles stores can switch to a version that can
-        // grow at the end of the array, handle OOB accesses or copy COW arrays
-        // and still stay MONOMORPHIC.
-        return isolate()->stub_cache()->ComputeKeyedStoreElement(
-            receiver_map, strict_mode, store_mode);
-      }
+    } else if (*previous_receiver_map == receiver->map() &&
+               old_store_mode == STANDARD_STORE &&
+               (IsGrowStoreMode(store_mode) ||
+                store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+                store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+      // A "normal" IC that handles stores can switch to a version that can
+      // grow at the end of the array, handle OOB accesses or copy COW arrays
+      // and still stay MONOMORPHIC.
+      return isolate()->stub_cache()->ComputeKeyedStoreElement(
+          receiver_map, strict_mode, store_mode);
     }
   }
 
@@ -2467,6 +2478,24 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
+  SealHandleScope scope(isolate);
+  ASSERT(args.length() == 4);
+  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+  Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> object = args.at<Object>(3);
+  StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+  return Runtime::SetObjectProperty(isolate,
+                                    object,
+                                    key,
+                                    value,
+                                    NONE,
+                                    strict_mode);
+}
+
+
 void BinaryOpIC::patch(Code* code) {
   set_target(code);
 }
diff --git a/src/ic.h b/src/ic.h
index aa867cc..c9f521f 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -858,6 +858,7 @@
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
 
diff --git a/src/icu_util.cc b/src/icu_util.cc
index aaafadc..91f4527 100644
--- a/src/icu_util.cc
+++ b/src/icu_util.cc
@@ -27,7 +27,7 @@
 
 #include "icu_util.h"
 
-#if defined(_WIN32)
+#if defined(_WIN32) && defined(ENABLE_I18N_SUPPORT)
 #include <windows.h>
 
 #include "unicode/putil.h"
@@ -39,8 +39,10 @@
 
 namespace v8 {
 
+namespace internal {
+
 bool InitializeICU() {
-#if defined(_WIN32)
+#if defined(_WIN32) && defined(ENABLE_I18N_SUPPORT)
   // We expect to find the ICU data module alongside the current module.
   HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
   if (!module) return false;
@@ -57,4 +59,4 @@
 #endif
 }
 
-}  // namespace v8
+} }  // namespace v8::internal
diff --git a/src/icu_util.h b/src/icu_util.h
index d7961b9..478abce 100644
--- a/src/icu_util.h
+++ b/src/icu_util.h
@@ -31,10 +31,12 @@
 
 namespace v8 {
 
+namespace internal {
+
 // Call this function to load ICU's data tables for the current process.  This
 // function should be called before ICU is used.
 bool InitializeICU();
 
-}  // namespace v8
+} }  // namespace v8::internal
 
 #endif  // V8_ICU_UTIL_H_
diff --git a/src/isolate.cc b/src/isolate.cc
index cf0f3b3..8d15566 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -622,10 +622,8 @@
   // Only display JS frames.
   if (!raw_frame->is_java_script()) return false;
   JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-  Object* raw_fun = frame->function();
-  // Not sure when this can happen but skip it just in case.
-  if (!raw_fun->IsJSFunction()) return false;
-  if ((raw_fun == caller) && !(*seen_caller)) {
+  JSFunction* fun = frame->function();
+  if ((fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
     return false;
   }
@@ -637,7 +635,6 @@
   // The --builtins-in-stack-traces command line flag allows including
   // internal call sites in the stack trace for debugging purposes.
   if (!FLAG_builtins_in_stack_traces) {
-    JSFunction* fun = JSFunction::cast(raw_fun);
     if (frame->receiver()->IsJSBuiltinsObject() ||
         (fun->IsBuiltin() && !fun->shared()->native())) {
       return false;
@@ -1201,7 +1198,7 @@
     int pos = frame->LookupCode()->SourcePosition(frame->pc());
     Handle<Object> pos_obj(Smi::FromInt(pos), this);
     // Fetch function and receiver.
-    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+    Handle<JSFunction> fun(frame->function());
     Handle<Object> recv(frame->receiver(), this);
     // Advance to the next JavaScript frame and determine if the
     // current frame is the top-level frame.
@@ -1225,7 +1222,7 @@
   StackTraceFrameIterator it(this);
   if (!it.done()) {
     JavaScriptFrame* frame = it.frame();
-    JSFunction* fun = JSFunction::cast(frame->function());
+    JSFunction* fun = frame->function();
     Object* script = fun->shared()->script();
     if (script->IsScript() &&
         !(Script::cast(script)->source()->IsUndefined())) {
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 31aebd6..6e414cc 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -434,6 +434,7 @@
           return UNCHANGED;
       }
     case JS_ARRAY_TYPE:
+      if (object->IsAccessCheckNeeded()) break;
       if (deferred_string_key) SerializeDeferredKey(comma, key);
       return SerializeJSArray(Handle<JSArray>::cast(object));
     case JS_VALUE_TYPE:
@@ -447,12 +448,13 @@
         SerializeString(Handle<String>::cast(object));
         return SUCCESS;
       } else if (object->IsJSObject()) {
+        if (object->IsAccessCheckNeeded()) break;
         if (deferred_string_key) SerializeDeferredKey(comma, key);
         return SerializeJSObject(Handle<JSObject>::cast(object));
-      } else {
-        return SerializeGeneric(object, key, comma, deferred_string_key);
       }
   }
+
+  return SerializeGeneric(object, key, comma, deferred_string_key);
 }
 
 
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 95f0b00..bab2e10 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1621,8 +1621,7 @@
                             LiveEdit::FunctionPatchabilityStatus status) {
   if (!frame->is_java_script()) return false;
 
-  Handle<JSFunction> function(
-      JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+  Handle<JSFunction> function(JavaScriptFrame::cast(frame)->function());
 
   Isolate* isolate = shared_info_array->GetIsolate();
   int len = GetArrayLength(shared_info_array);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 980c039..ab5e936 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1989,6 +1989,81 @@
 }
 
 
+int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
+    NewSpace* new_space,
+    NewSpacePage* p) {
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  int survivors_size = 0;
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT(static_cast<unsigned>(cell_index) ==
+           Bitmap::IndexToCell(
+               Bitmap::CellAlignIndex(
+                   p->AddressToMarkbitIndex(cell_base))));
+
+    MarkBit::CellType current_cell = cells[cell_index];
+    if (current_cell == 0) continue;
+
+    int offset = 0;
+    while (current_cell != 0) {
+      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
+      current_cell >>= trailing_zeros;
+      offset += trailing_zeros;
+      Address address = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(address);
+
+      int size = object->Size();
+      survivors_size += size;
+
+      offset++;
+      current_cell >>= 1;
+      // Aggressively promote young survivors to the old space.
+      if (TryPromoteObject(object, size)) {
+        continue;
+      }
+
+      // Promotion failed. Just migrate object to another semispace.
+      MaybeObject* allocation = new_space->AllocateRaw(size);
+      if (allocation->IsFailure()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        ASSERT(!allocation->IsFailure());
+      }
+      Object* target = allocation->ToObjectUnchecked();
+
+      MigrateObject(HeapObject::cast(target)->address(),
+                    object->address(),
+                    size,
+                    NEW_SPACE);
+    }
+    cells[cell_index] = 0;
+  }
+  return survivors_size;
+}
+
+
 static void DiscoverGreyObjectsInSpace(Heap* heap,
                                        MarkingDeque* marking_deque,
                                        PagedSpace* space) {
@@ -2895,45 +2970,10 @@
   // migrate live objects and write forwarding addresses.  This stage puts
   // new entries in the store buffer and may cause some pages to be marked
   // scan-on-scavenge.
-  SemiSpaceIterator from_it(from_bottom, from_top);
-  for (HeapObject* object = from_it.Next();
-       object != NULL;
-       object = from_it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) {
-      mark_bit.Clear();
-      // Don't bother decrementing live bytes count. We'll discard the
-      // entire page at the end.
-      int size = object->Size();
-      survivors_size += size;
-
-      // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(object, size)) {
-        continue;
-      }
-
-      // Promotion failed. Just migrate object to another semispace.
-      MaybeObject* allocation = new_space->AllocateRaw(size);
-      if (allocation->IsFailure()) {
-        if (!new_space->AddFreshPage()) {
-          // Shouldn't happen. We are sweeping linearly, and to-space
-          // has the same number of pages as from-space, so there is
-          // always room.
-          UNREACHABLE();
-        }
-        allocation = new_space->AllocateRaw(size);
-        ASSERT(!allocation->IsFailure());
-      }
-      Object* target = allocation->ToObjectUnchecked();
-
-      MigrateObject(HeapObject::cast(target)->address(),
-                    object->address(),
-                    size,
-                    NEW_SPACE);
-    } else {
-      // Mark dead objects in the new space with null in their map field.
-      Memory::Address_at(object->address()) = NULL;
-    }
+  NewSpacePageIterator it(from_bottom, from_top);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
   }
 
   heap_->IncrementYoungSurvivorsCounter(survivors_size);
@@ -3354,7 +3394,8 @@
     StoreBufferRebuildScope scope(heap_,
                                   heap_->store_buffer(),
                                   &Heap::ScavengeStoreBufferCallback);
-    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
+        &UpdatePointer);
   }
 
   { GCTracer::Scope gc_scope(tracer_,
diff --git a/src/mark-compact.h b/src/mark-compact.h
index ab3711a..b3e20f7 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -919,6 +919,9 @@
   // regions to each space's free list.
   void SweepSpaces();
 
+  int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
+                                           NewSpacePage* p);
+
   void EvacuateNewSpace();
 
   void EvacuateLiveObjectsFromPage(Page* p);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 29178eb..c4a4bdc 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -259,6 +259,17 @@
 }
 
 
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { a0, a3, a1, a2 };
+  descriptor->register_param_count_ = 4;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 6978cde..ae8453d 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -457,22 +457,12 @@
   // Get the bailout id from the stack.
   __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
 
-  // Get the address of the location in the code object if possible (a3) (return
+  // Get the address of the location in the code object (a3) (return
   // address for lazy deoptimization) and compute the fp-to-sp delta in
   // register t0.
-  if (type() == EAGER || type() == SOFT) {
-    __ mov(a3, zero_reg);
-    // Correct one word for bailout id.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else if (type() == OSR) {
-    __ mov(a3, ra);
-    // Correct one word for bailout id.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ mov(a3, ra);
-    // Correct two words for bailout id and return address.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
+  __ mov(a3, ra);
+  // Correct one word for bailout id.
+  __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
 
   __ Subu(t0, fp, t0);
 
@@ -521,13 +511,8 @@
     __ sdc1(f0, MemOperand(a1, dst_offset));
   }
 
-  // Remove the bailout id, eventually return address, and the saved registers
-  // from the stack.
-  if (type() == EAGER || type() == SOFT || type() == OSR) {
-    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
+  // Remove the bailout id and the saved registers from the stack.
+  __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
 
   // Compute a pointer to the unwinding limit in register a2; that is
   // the first stack slot not part of the input frame.
@@ -628,25 +613,19 @@
 
 
 // Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 6 * Assembler::kInstrSize;
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
 
-  // Create a sequence of deoptimization entries. Note that any
-  // registers may be still live.
+  // Create a sequence of deoptimization entries.
+  // Note that registers are still live when jumping to an entry.
   Label table_start;
   __ bind(&table_start);
   for (int i = 0; i < count(); i++) {
     Label start;
     __ bind(&start);
-    if (type() != EAGER && type() != SOFT) {
-      // Emulate ia32 like call by pushing return address to stack.
-      __ addiu(sp, sp, -2 * kPointerSize);
-      __ sw(ra, MemOperand(sp, 1 * kPointerSize));
-    } else {
-      __ addiu(sp, sp, -1 * kPointerSize);
-    }
+    __ addiu(sp, sp, -1 * kPointerSize);
     // Jump over the remaining deopt entries (including this one).
     // This code is always reached by calling Jump, which puts the target (label
     // start) into t9.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 87d64e2..56b6699 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -332,8 +332,7 @@
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Label table_start;
   __ bind(&table_start);
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
+  Label needs_frame;
   for (int i = 0; i < deopt_jump_table_.length(); i++) {
     __ bind(&deopt_jump_table_[i].label);
     Address entry = deopt_jump_table_[i].address;
@@ -346,43 +345,22 @@
     }
     __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
     if (deopt_jump_table_[i].needs_frame) {
-      if (type == Deoptimizer::LAZY) {
-        if (needs_frame_is_call.is_bound()) {
-          __ Branch(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ Addu(fp, sp, Operand(2 * kPointerSize));
-          __ Call(t9);
-        }
+      if (needs_frame.is_bound()) {
+        __ Branch(&needs_frame);
       } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ Branch(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ Addu(fp, sp, Operand(2 * kPointerSize));
-          __ Jump(t9);
-        }
+        __ bind(&needs_frame);
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        // This variant of deopt can only be used with stubs. Since we don't
+        // have a function pointer to install in the stack frame that we're
+        // building, install a special marker there instead.
+        ASSERT(info()->IsStub());
+        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ Addu(fp, sp, Operand(2 * kPointerSize));
+        __ Call(t9);
       }
     } else {
-      if (type == Deoptimizer::LAZY) {
-        __ Call(t9);
-      } else {
-        __ Jump(t9);
-      }
+      __ Call(t9);
     }
   }
   __ RecordComment("]");
@@ -780,13 +758,8 @@
   }
 
   ASSERT(info()->IsStub() || frame_is_built_);
-  bool needs_lazy_deopt = info()->IsStub();
   if (cc == al && frame_is_built_) {
-    if (needs_lazy_deopt) {
-      __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
-    } else {
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
-    }
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 5697d81..9427262 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -330,7 +330,6 @@
   stream->Add("= ");
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
-  ASSERT(hydrogen()->property_cell()->value()->IsSmi());
   ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index a8d9b35..c1edcb1 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -309,6 +309,8 @@
 
 
 int main(int argc, char** argv) {
+  V8::InitializeICU();
+
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
 
diff --git a/src/object-observe.js b/src/object-observe.js
index ada7919..90c9a69 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -216,8 +216,10 @@
   }
 
   var objectInfo = objectInfoMap.get(object);
-  if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
-  %SetIsObserved(object, true);
+  if (IS_UNDEFINED(objectInfo)) {
+    objectInfo = CreateObjectInfo(object);
+    %SetIsObserved(object);
+  }
 
   EnsureObserverRemoved(objectInfo, callback);
 
@@ -241,12 +243,6 @@
     return object;
 
   EnsureObserverRemoved(objectInfo, callback);
-
-  if (objectInfo.changeObservers.length === 0 &&
-      objectInfo.inactiveObservers.length === 0) {
-    %SetIsObserved(object, false);
-  }
-
   return object;
 }
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f251129..091a0eb 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1335,7 +1335,7 @@
                                            ElementsKind to) {
   if (FLAG_track_allocation_sites &&
       IsFastSmiElementsKind(from) &&
-      (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) {
+      IsMoreGeneralElementsKindTransition(from, to)) {
     return TRACK_ALLOCATION_SITE;
   }
 
@@ -3745,7 +3745,8 @@
 
 
 Code::ExtraICState Code::extra_ic_state() {
-  ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+  ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
+         || ic_state() == DEBUG_STUB);
   return ExtractExtraICStateFromFlags(flags());
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index df042a1..7b06b87 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -551,7 +551,9 @@
   // No accessible property found.
   *attributes = ABSENT;
   Heap* heap = name->GetHeap();
-  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+  Isolate* isolate = heap->isolate();
+  isolate->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return heap->undefined_value();
 }
 
@@ -925,6 +927,7 @@
       Isolate* isolate = heap->isolate();
       if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
         isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
+        RETURN_IF_SCHEDULED_EXCEPTION(isolate);
         return heap->undefined_value();
       }
     }
@@ -3243,7 +3246,6 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    // A GlobalProxy's prototype should always be a proper JSObject.
     return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
   }
 
@@ -3365,6 +3367,7 @@
   HandleScope scope(isolate);
   Handle<Object> value_handle(value, isolate);
   isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return *value_handle;
 }
 
@@ -4015,7 +4018,7 @@
   Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
   PropertyAttributes old_attributes = ABSENT;
   bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
-  if (is_observed) {
+  if (is_observed && lookup.IsProperty()) {
     if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
     old_attributes = lookup.GetAttributes();
   }
@@ -5060,6 +5063,7 @@
   if (IsAccessCheckNeeded() &&
       !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
     isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return isolate->heap()->false_value();
   }
 
@@ -5137,6 +5141,7 @@
   if (IsAccessCheckNeeded() &&
       !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
     isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return isolate->heap()->false_value();
   }
 
@@ -5367,6 +5372,7 @@
                                isolate->heap()->undefined_value(),
                                v8::ACCESS_KEYS)) {
     isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return isolate->heap()->false_value();
   }
 
@@ -5445,6 +5451,7 @@
                                heap->undefined_value(),
                                v8::ACCESS_KEYS)) {
     isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return heap->false_value();
   }
 
@@ -5550,6 +5557,40 @@
 }
 
 
+MUST_USE_RESULT MaybeObject* JSObject::SetObserved(Isolate* isolate) {
+  if (map()->is_observed())
+    return isolate->heap()->undefined_value();
+
+  Heap* heap = isolate->heap();
+
+  if (!HasExternalArrayElements()) {
+    // Go to dictionary mode, so that we don't skip map checks.
+    MaybeObject* maybe = NormalizeElements();
+    if (maybe->IsFailure()) return maybe;
+    ASSERT(!HasFastElements());
+  }
+
+  LookupResult result(isolate);
+  map()->LookupTransition(this, heap->observed_symbol(), &result);
+
+  Map* new_map;
+  if (result.IsTransition()) {
+    new_map = result.GetTransitionTarget();
+    ASSERT(new_map->is_observed());
+  } else if (map()->CanHaveMoreTransitions()) {
+    MaybeObject* maybe_new_map = map()->CopyForObserved();
+    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+  } else {
+    MaybeObject* maybe_copy = map()->Copy();
+    if (!maybe_copy->To(&new_map)) return maybe_copy;
+    new_map->set_is_observed(true);
+  }
+  set_map(new_map);
+
+  return heap->undefined_value();
+}
+
+
 MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
   StackLimitCheck check(isolate);
   if (check.HasOverflowed()) return isolate->StackOverflow();
@@ -6258,6 +6299,7 @@
   if (IsAccessCheckNeeded() &&
       !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
     isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return isolate->heap()->undefined_value();
   }
 
@@ -6333,7 +6375,7 @@
 }
 
 
-Object* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
+MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
   Heap* heap = GetHeap();
 
   // Make sure that the top context does not change when doing callbacks or
@@ -6344,6 +6386,7 @@
   if (IsAccessCheckNeeded() &&
       !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
     heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    RETURN_IF_SCHEDULED_EXCEPTION(heap->isolate());
     return heap->undefined_value();
   }
 
@@ -6671,6 +6714,39 @@
 }
 
 
+MaybeObject* Map::CopyForObserved() {
+  ASSERT(!is_observed());
+
+  // In case the map owned its own descriptors, share the descriptors and
+  // transfer ownership to the new map.
+  Map* new_map;
+  MaybeObject* maybe_new_map;
+  if (owns_descriptors()) {
+    maybe_new_map = CopyDropDescriptors();
+  } else {
+    maybe_new_map = Copy();
+  }
+  if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+  TransitionArray* transitions;
+  MaybeObject* maybe_transitions = AddTransition(GetHeap()->observed_symbol(),
+                                                 new_map,
+                                                 FULL_TRANSITION);
+  if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+  set_transitions(transitions);
+
+  new_map->set_is_observed(true);
+
+  if (owns_descriptors()) {
+    new_map->InitializeDescriptors(instance_descriptors());
+    set_owns_descriptors(false);
+  }
+
+  new_map->SetBackPointer(this);
+  return new_map;
+}
+
+
 MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
   if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
 
@@ -9976,20 +10052,6 @@
 }
 
 
-byte Code::compare_nil_state() {
-  ASSERT(is_compare_nil_ic_stub());
-  return CompareNilICStub::ExtractTypesFromExtraICState(
-      extended_extra_ic_state());
-}
-
-
-byte Code::compare_nil_value() {
-  ASSERT(is_compare_nil_ic_stub());
-  return CompareNilICStub::ExtractNilValueFromExtraICState(
-      extended_extra_ic_state());
-}
-
-
 void Code::InvalidateRelocation() {
   set_relocation_info(GetHeap()->empty_byte_array());
 }
@@ -12080,6 +12142,7 @@
   if (IsAccessCheckNeeded()) {
     if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
       isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
       return value_raw;
     }
   }
@@ -12318,6 +12381,10 @@
   if (site->IsLiteralSite()) {
     JSArray* transition_info = JSArray::cast(site->transition_info());
     ElementsKind kind = transition_info->GetElementsKind();
+    // if kind is holey ensure that to_kind is as well.
+    if (IsHoleyElementsKind(kind)) {
+      to_kind = GetHoleyElementsKind(to_kind);
+    }
     if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
       // If the array is huge, it's not likely to be defined in a local
       // function, so we shouldn't make new instances of it very often.
@@ -12336,6 +12403,10 @@
     }
   } else {
     ElementsKind kind = site->GetElementsKind();
+    // if kind is holey ensure that to_kind is as well.
+    if (IsHoleyElementsKind(kind)) {
+      to_kind = GetHoleyElementsKind(to_kind);
+    }
     if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
       if (FLAG_trace_track_allocation_sites) {
         PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
@@ -12821,6 +12892,13 @@
     }
   }
 
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return false;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->HasRealElementProperty(isolate, index);
+  }
+
   return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
 }
 
diff --git a/src/objects.h b/src/objects.h
index 470d8e8..bf04934 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1944,7 +1944,7 @@
                              Handle<Object> setter,
                              PropertyAttributes attributes);
 
-  Object* LookupAccessor(Name* name, AccessorComponent component);
+  MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
 
@@ -2333,6 +2333,10 @@
   // ES5 Object.freeze
   MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
 
+
+  // Called the first time an object is observed with ES7 Object.observe.
+  MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
+
   // Copy object
   MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
 
@@ -4673,10 +4677,6 @@
   // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
   inline byte to_boolean_state();
 
-  // [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
-  byte compare_nil_state();
-  byte compare_nil_value();
-
   // [has_function_cache]: For kind STUB tells whether there is a function
   // cache is passed to the stub.
   inline bool has_function_cache();
@@ -5481,8 +5481,10 @@
       int index,
       TransitionFlag flag);
   MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
+
   MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
                                                   TransitionFlag flag);
+  MUST_USE_RESULT MaybeObject* CopyForObserved();
 
   MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
                                               NormalizedMapSharingMode sharing);
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 7c3da1b..51321c7 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -574,57 +574,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class CygwinMutex : public Mutex {
- public:
-  CygwinMutex() {
-    pthread_mutexattr_t attrs;
-    memset(&attrs, 0, sizeof(attrs));
-
-    int result = pthread_mutexattr_init(&attrs);
-    ASSERT(result == 0);
-    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-    ASSERT(result == 0);
-    result = pthread_mutex_init(&mutex_, &attrs);
-    ASSERT(result == 0);
-  }
-
-  virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() {
-    int result = pthread_mutex_lock(&mutex_);
-    return result;
-  }
-
-  virtual int Unlock() {
-    int result = pthread_mutex_unlock(&mutex_);
-    return result;
-  }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new CygwinMutex();
-}
-
-
 class CygwinSemaphore : public Semaphore {
  public:
   explicit CygwinSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index e2c2c42..c771cd3 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -196,27 +196,7 @@
 
 
 void OS::DumpBacktrace() {
-  void* trace[100];
-  int size = backtrace(trace, ARRAY_SIZE(trace));
-  char** symbols = backtrace_symbols(trace, size);
-  fprintf(stderr, "\n==== C stack trace ===============================\n\n");
-  if (size == 0) {
-    fprintf(stderr, "(empty)\n");
-  } else if (symbols == NULL) {
-    fprintf(stderr, "(no symbols)\n");
-  } else {
-    for (int i = 1; i < size; ++i) {
-      fprintf(stderr, "%2d: ", i);
-      char mangled[201];
-      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
-        fprintf(stderr, "%s\n", mangled);
-      } else {
-        fprintf(stderr, "??\n");
-      }
-    }
-  }
-  fflush(stderr);
-  free(symbols);
+  POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
 }
 
 
@@ -318,30 +298,7 @@
 
 
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  int frames_size = frames.length();
-  ScopedVector<void*> addresses(frames_size);
-
-  int frames_count = backtrace(addresses.start(), frames_size);
-
-  char** symbols = backtrace_symbols(addresses.start(), frames_count);
-  if (symbols == NULL) {
-    return kStackWalkError;
-  }
-
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
-             "%s",
-             symbols[i]);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-
-  free(symbols);
-
-  return frames_count;
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
 }
 
 
@@ -568,56 +525,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class FreeBSDMutex : public Mutex {
- public:
-  FreeBSDMutex() {
-    pthread_mutexattr_t attrs;
-    int result = pthread_mutexattr_init(&attrs);
-    ASSERT(result == 0);
-    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-    ASSERT(result == 0);
-    result = pthread_mutex_init(&mutex_, &attrs);
-    ASSERT(result == 0);
-    USE(result);
-  }
-
-  virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() {
-    int result = pthread_mutex_lock(&mutex_);
-    return result;
-  }
-
-  virtual int Unlock() {
-    int result = pthread_mutex_unlock(&mutex_);
-    return result;
-  }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new FreeBSDMutex();
-}
-
-
 class FreeBSDSemaphore : public Semaphore {
  public:
   explicit FreeBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 2c6a36c..ace4056 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -146,6 +146,9 @@
     case VFP3:
       search_string = "vfpv3";
       break;
+    case NEON:
+      search_string = "neon";
+      break;
     case ARMv7:
       search_string = "ARMv7";
       break;
@@ -200,6 +203,36 @@
 }
 
 
+CpuPart OS::GetCpuPart(CpuImplementer implementer) {
+  static bool use_cached_value = false;
+  static CpuPart cached_value = CPU_UNKNOWN;
+  if (use_cached_value) {
+    return cached_value;
+  }
+  if (implementer == ARM_IMPLEMENTER) {
+    if (CPUInfoContainsString("CPU part\t: 0xc0f")) {
+      cached_value = CORTEX_A15;
+    } else if (CPUInfoContainsString("CPU part\t: 0xc0c")) {
+      cached_value = CORTEX_A12;
+    } else if (CPUInfoContainsString("CPU part\t: 0xc09")) {
+      cached_value = CORTEX_A9;
+    } else if (CPUInfoContainsString("CPU part\t: 0xc08")) {
+      cached_value = CORTEX_A8;
+    } else if (CPUInfoContainsString("CPU part\t: 0xc07")) {
+      cached_value = CORTEX_A7;
+    } else if (CPUInfoContainsString("CPU part\t: 0xc05")) {
+      cached_value = CORTEX_A5;
+    } else {
+      cached_value = CPU_UNKNOWN;
+    }
+  } else {
+    cached_value = CPU_UNKNOWN;
+  }
+  use_cached_value = true;
+  return cached_value;
+}
+
+
 bool OS::ArmUsingHardFloat() {
   // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
   // the Floating Point ABI used (PCS stands for Procedure Call Standard).
@@ -418,32 +451,9 @@
 
 
 void OS::DumpBacktrace() {
+  // backtrace is a glibc extension.
 #if defined(__GLIBC__) && !defined(__UCLIBC__)
-  void* trace[100];
-  int size = backtrace(trace, ARRAY_SIZE(trace));
-  char** symbols = backtrace_symbols(trace, size);
-  fprintf(stderr, "\n==== C stack trace ===============================\n\n");
-  if (size == 0) {
-    fprintf(stderr, "(empty)\n");
-  } else if (symbols == NULL) {
-    fprintf(stderr, "(no symbols)\n");
-  } else {
-    for (int i = 1; i < size; ++i) {
-      fprintf(stderr, "%2d: ", i);
-      char mangled[201];
-      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
-        int status;
-        size_t length;
-        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
-        fprintf(stderr, "%s\n", demangled ? demangled : mangled);
-        free(demangled);
-      } else {
-        fprintf(stderr, "??\n");
-      }
-    }
-  }
-  fflush(stderr);
-  free(symbols);
+  POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
 #endif
 }
 
@@ -597,33 +607,10 @@
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
   // backtrace is a glibc extension.
 #if defined(__GLIBC__) && !defined(__UCLIBC__)
-  int frames_size = frames.length();
-  ScopedVector<void*> addresses(frames_size);
-
-  int frames_count = backtrace(addresses.start(), frames_size);
-
-  char** symbols = backtrace_symbols(addresses.start(), frames_count);
-  if (symbols == NULL) {
-    return kStackWalkError;
-  }
-
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
-             "%s",
-             symbols[i]);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-
-  free(symbols);
-
-  return frames_count;
-#else  // defined(__GLIBC__) && !defined(__UCLIBC__)
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
+#else
   return 0;
-#endif  // defined(__GLIBC__) && !defined(__UCLIBC__)
+#endif
 }
 
 
@@ -860,56 +847,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class LinuxMutex : public Mutex {
- public:
-  LinuxMutex() {
-    pthread_mutexattr_t attrs;
-    int result = pthread_mutexattr_init(&attrs);
-    ASSERT(result == 0);
-    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-    ASSERT(result == 0);
-    result = pthread_mutex_init(&mutex_, &attrs);
-    ASSERT(result == 0);
-    USE(result);
-  }
-
-  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() {
-    int result = pthread_mutex_lock(&mutex_);
-    return result;
-  }
-
-  virtual int Unlock() {
-    int result = pthread_mutex_unlock(&mutex_);
-    return result;
-  }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new LinuxMutex();
-}
-
-
 class LinuxSemaphore : public Semaphore {
  public:
   explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index f4d8b33..097691b 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -53,6 +53,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <errno.h>
+#include <cxxabi.h>
 
 #undef MAP_TYPE
 
@@ -189,7 +190,10 @@
 
 
 void OS::DumpBacktrace() {
-  // Currently unsupported.
+  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+  if (backtrace == NULL) return;
+
+  POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
 }
 
 
@@ -315,34 +319,9 @@
 
 int OS::StackWalk(Vector<StackFrame> frames) {
   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
-  if (backtrace == NULL)
-    return 0;
+  if (backtrace == NULL) return 0;
 
-  int frames_size = frames.length();
-  ScopedVector<void*> addresses(frames_size);
-
-  int frames_count = backtrace(addresses.start(), frames_size);
-
-  char** symbols = backtrace_symbols(addresses.start(), frames_count);
-  if (symbols == NULL) {
-    return kStackWalkError;
-  }
-
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    SNPrintF(MutableCStrVector(frames[i].text,
-                               kStackWalkMaxTextLen),
-             "%s",
-             symbols[i]);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-
-  free(symbols);
-
-  return frames_count;
+  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
 }
 
 
@@ -652,45 +631,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class MacOSMutex : public Mutex {
- public:
-  MacOSMutex() {
-    pthread_mutexattr_t attr;
-    pthread_mutexattr_init(&attr);
-    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-    pthread_mutex_init(&mutex_, &attr);
-  }
-
-  virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
-  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new MacOSMutex();
-}
-
-
 class MacOSSemaphore : public Semaphore {
  public:
   explicit MacOSSemaphore(int count) {
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 1b481f4..dd5a3dd 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -220,6 +220,11 @@
 }
 
 
+CpuPart OS::GetCpuPart(CpuImplementer implementer) {
+  UNIMPLEMENTED();
+}
+
+
 bool OS::ArmCpuHasFeature(CpuFeature feature) {
   UNIMPLEMENTED();
 }
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index f849d58..a40df48 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -607,56 +607,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class OpenBSDMutex : public Mutex {
- public:
-  OpenBSDMutex() {
-    pthread_mutexattr_t attrs;
-    int result = pthread_mutexattr_init(&attrs);
-    ASSERT(result == 0);
-    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-    ASSERT(result == 0);
-    result = pthread_mutex_init(&mutex_, &attrs);
-    ASSERT(result == 0);
-    USE(result);
-  }
-
-  virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() {
-    int result = pthread_mutex_lock(&mutex_);
-    return result;
-  }
-
-  virtual int Unlock() {
-    int result = pthread_mutex_unlock(&mutex_);
-    return result;
-  }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new OpenBSDMutex();
-}
-
-
 class OpenBSDSemaphore : public Semaphore {
  public:
   explicit OpenBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 6e83b04..bd8a33f 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -31,6 +31,8 @@
 
 #include "platform-posix.h"
 
+#include <pthread.h>
+#include <sched.h>  // for sched_yield
 #include <unistd.h>
 #include <errno.h>
 #include <time.h>
@@ -341,7 +343,26 @@
   (*memmove_function)(dest, src, size);
 }
 
-#endif  // V8_TARGET_ARCH_IA32
+#elif defined(V8_HOST_ARCH_ARM)
+void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
+                               const uint8_t* src,
+                               size_t chars) {
+  uint16_t *limit = dest + chars;
+  while (dest < limit) {
+    *dest++ = static_cast<uint16_t>(*src++);
+  }
+}
+
+
+OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
+OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function =
+    &OS::MemCopyUint16Uint8Wrapper;
+// Defined in codegen-arm.cc.
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+    OS::MemCopyUint8Function stub);
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+    OS::MemCopyUint16Uint8Function stub);
+#endif
 
 
 void POSIXPostSetUp() {
@@ -350,6 +371,11 @@
   if (generated_memmove != NULL) {
     memmove_function = generated_memmove;
   }
+#elif defined(V8_HOST_ARCH_ARM)
+  OS::memcopy_uint8_function =
+      CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
+  OS::memcopy_uint16_uint8_function =
+      CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
 #endif
   init_fast_sin_function();
   init_fast_cos_function();
@@ -375,6 +401,57 @@
 
 
 // ----------------------------------------------------------------------------
+// POSIX thread support.
+//
+
+void Thread::YieldCPU() {
+  sched_yield();
+}
+
+
+class POSIXMutex : public Mutex {
+ public:
+  POSIXMutex() {
+    pthread_mutexattr_t attr;
+    memset(&attr, 0, sizeof(attr));
+    int result = pthread_mutexattr_init(&attr);
+    ASSERT(result == 0);
+    result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+    ASSERT(result == 0);
+    result = pthread_mutex_init(&mutex_, &attr);
+    ASSERT(result == 0);
+    result = pthread_mutexattr_destroy(&attr);
+    ASSERT(result == 0);
+    USE(result);
+  }
+
+  virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
+
+  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
+
+  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
+ private:
+  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new POSIXMutex();
+}
+
+
+// ----------------------------------------------------------------------------
 // POSIX socket support.
 //
 
diff --git a/src/platform-posix.h b/src/platform-posix.h
index 7a982ed..79178fd 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -28,12 +28,78 @@
 #ifndef V8_PLATFORM_POSIX_H_
 #define V8_PLATFORM_POSIX_H_
 
+#include <cxxabi.h>
+#include <stdio.h>
+
+#include "platform.h"
+
 namespace v8 {
 namespace internal {
 
 // Used by platform implementation files during OS::PostSetUp().
 void POSIXPostSetUp();
 
+// Used by platform implementation files during OS::DumpBacktrace()
+// and OS::StackWalk().
+template<int (*backtrace)(void**, int),
+         char** (*backtrace_symbols)(void* const*, int)>
+struct POSIXBacktraceHelper {
+  static void DumpBacktrace() {
+    void* trace[100];
+    int size = backtrace(trace, ARRAY_SIZE(trace));
+    char** symbols = backtrace_symbols(trace, size);
+    fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+    if (size == 0) {
+      fprintf(stderr, "(empty)\n");
+    } else if (symbols == NULL) {
+      fprintf(stderr, "(no symbols)\n");
+    } else {
+      for (int i = 1; i < size; ++i) {
+        fprintf(stderr, "%2d: ", i);
+        char mangled[201];
+        if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT
+          int status;
+          size_t length;
+          char* demangled = abi::__cxa_demangle(
+              mangled, NULL, &length, &status);
+          fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled);
+          free(demangled);
+        } else {
+          fprintf(stderr, "??\n");
+        }
+      }
+    }
+    fflush(stderr);
+    free(symbols);
+  }
+
+  static int StackWalk(Vector<OS::StackFrame> frames) {
+    int frames_size = frames.length();
+    ScopedVector<void*> addresses(frames_size);
+
+    int frames_count = backtrace(addresses.start(), frames_size);
+
+    char** symbols = backtrace_symbols(addresses.start(), frames_count);
+    if (symbols == NULL) {
+      return OS::kStackWalkError;
+    }
+
+    for (int i = 0; i < frames_count; i++) {
+      frames[i].address = addresses[i];
+      // Format a text representation of the frame based on the information
+      // available.
+      OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
+                   "%s", symbols[i]);
+      // Make sure line termination is in place.
+      frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
+    }
+
+    free(symbols);
+
+    return frames_count;
+  }
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_PLATFORM_POSIX_H_
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 4b0094f..3c4df66 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -38,7 +38,6 @@
 #include <ucontext.h>  // walkstack(), getcontext()
 #include <dlfcn.h>     // dladdr
 #include <pthread.h>
-#include <sched.h>  // for sched_yield
 #include <semaphore.h>
 #include <time.h>
 #include <sys/time.h>  // gettimeofday(), timeradd()
@@ -539,46 +538,6 @@
 }
 
 
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
-  SolarisMutex() {
-    pthread_mutexattr_t attr;
-    pthread_mutexattr_init(&attr);
-    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-    pthread_mutex_init(&mutex_, &attr);
-  }
-
-  ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
-  int Lock() { return pthread_mutex_lock(&mutex_); }
-
-  int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new SolarisMutex();
-}
-
-
 class SolarisSemaphore : public Semaphore {
  public:
   explicit SolarisSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform.h b/src/platform.h
index 24d21cb..211be39 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -315,6 +315,9 @@
   // Support runtime detection of Cpu implementer
   static CpuImplementer GetCpuImplementer();
 
+  // Support runtime detection of Cpu implementer
+  static CpuPart GetCpuPart(CpuImplementer implementer);
+
   // Support runtime detection of VFP3 on ARM CPUs.
   static bool ArmCpuHasFeature(CpuFeature feature);
 
@@ -343,7 +346,42 @@
   static void MemCopy(void* dest, const void* src, size_t size) {
     MemMove(dest, src, size);
   }
-#else  // V8_TARGET_ARCH_IA32
+#elif defined(V8_HOST_ARCH_ARM)
+  typedef void (*MemCopyUint8Function)(uint8_t* dest,
+                                       const uint8_t* src,
+                                       size_t size);
+  static MemCopyUint8Function memcopy_uint8_function;
+  static void MemCopyUint8Wrapper(uint8_t* dest,
+                                  const uint8_t* src,
+                                  size_t chars) {
+    memcpy(dest, src, chars);
+  }
+  // For values < 16, the assembler function is slower than the inlined C code.
+  static const int kMinComplexMemCopy = 16;
+  static void MemCopy(void* dest, const void* src, size_t size) {
+    (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+                              reinterpret_cast<const uint8_t*>(src),
+                              size);
+  }
+  static void MemMove(void* dest, const void* src, size_t size) {
+    memmove(dest, src, size);
+  }
+
+  typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
+                                             const uint8_t* src,
+                                             size_t size);
+  static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
+  static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
+                                        const uint8_t* src,
+                                        size_t chars);
+  // For values < 12, the assembler function is slower than the inlined C code.
+  static const int kMinComplexConvertMemCopy = 12;
+  static void MemCopyUint16Uint8(uint16_t* dest,
+                                 const uint8_t* src,
+                                 size_t size) {
+    (*memcopy_uint16_uint8_function)(dest, src, size);
+  }
+#else
   // Copy memory area to disjoint memory area.
   static void MemCopy(void* dest, const void* src, size_t size) {
     memcpy(dest, src, size);
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index bd02a69..ff41432 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -247,7 +247,7 @@
        frame_count++ < frame_count_limit && !it.done();
        it.Advance()) {
     JavaScriptFrame* frame = it.frame();
-    JSFunction* function = JSFunction::cast(frame->function());
+    JSFunction* function = frame->function();
 
     if (!FLAG_watch_ic_patching) {
       // Adjust threshold each time we have processed
diff --git a/src/runtime.cc b/src/runtime.cc
index 6e04560..3bc9439 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1456,6 +1456,7 @@
                                  isolate->heap()->proto_string(),
                                  v8::ACCESS_GET)) {
       isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
       return isolate->heap()->undefined_value();
     }
     obj = obj->GetPrototype(isolate);
@@ -1560,28 +1561,20 @@
 };
 
 
-static AccessCheckResult CheckElementAccess(
-    JSObject* obj,
-    uint32_t index,
-    v8::AccessType access_type) {
-  // TODO(1095): we should traverse hidden prototype hierachy as well.
-  if (CheckGenericAccess(
-          obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
-    return ACCESS_ALLOWED;
-  }
-
-  obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
-  return ACCESS_FORBIDDEN;
-}
-
-
 static AccessCheckResult CheckPropertyAccess(
     JSObject* obj,
     Name* name,
     v8::AccessType access_type) {
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    return CheckElementAccess(obj, index, access_type);
+    // TODO(1095): we should traverse hidden prototype hierachy as well.
+    if (CheckGenericAccess(
+            obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+      return ACCESS_ALLOWED;
+    }
+
+    obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+    return ACCESS_FORBIDDEN;
   }
 
   LookupResult lookup(obj->GetIsolate());
@@ -1641,14 +1634,21 @@
   Heap* heap = isolate->heap();
   // Due to some WebKit tests, we want to make sure that we do not log
   // more than one access failure here.
-  switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) {
+  AccessCheckResult access_check_result =
+      CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+  switch (access_check_result) {
     case ACCESS_FORBIDDEN: return heap->false_value();
     case ACCESS_ALLOWED: break;
     case ACCESS_ABSENT: return heap->undefined_value();
   }
 
   PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
-  if (attrs == ABSENT) return heap->undefined_value();
+  if (attrs == ABSENT) {
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+    return heap->undefined_value();
+  }
+  ASSERT(!isolate->has_scheduled_exception());
   AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
   Handle<AccessorPair> accessors(raw_accessors, isolate);
 
@@ -1669,10 +1669,16 @@
     Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
     Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
     if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
+      ASSERT(!isolate->has_scheduled_exception());
       elms->set(GETTER_INDEX, getter);
+    } else {
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     }
     if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
+      ASSERT(!isolate->has_scheduled_exception());
       elms->set(SETTER_INDEX, setter);
+    } else {
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     }
   }
 
@@ -2797,7 +2803,7 @@
 
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = it.frame();
-  JSFunction* function = JSFunction::cast(frame->function());
+  JSFunction* function = frame->function();
   RUNTIME_ASSERT(function->shared()->is_generator());
 
   JSGeneratorObject* generator;
@@ -2826,8 +2832,8 @@
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
-  RUNTIME_ASSERT(JSFunction::cast(frame->function())->shared()->is_generator());
-  ASSERT_EQ(JSFunction::cast(frame->function()), generator_object->function());
+  RUNTIME_ASSERT(frame->function()->shared()->is_generator());
+  ASSERT_EQ(frame->function(), generator_object->function());
 
   // The caller should have saved the context and continuation already.
   ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
@@ -4813,6 +4819,7 @@
 
   bool fast = obj->HasFastProperties();
   JSObject::DefineAccessor(obj, name, getter, setter, attr);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   if (fast) JSObject::TransformToFastProperties(obj, 0);
   return isolate->heap()->undefined_value();
 }
@@ -5339,9 +5346,9 @@
 }
 
 
-static Object* HasLocalPropertyImplementation(Isolate* isolate,
-                                              Handle<JSObject> object,
-                                              Handle<Name> key) {
+static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
+                                                   Handle<JSObject> object,
+                                                   Handle<Name> key) {
   if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
   // Handle hidden prototypes.  If there's a hidden prototype above this thing
   // then we have to check it for properties, because they are supposed to
@@ -5353,6 +5360,7 @@
                                           Handle<JSObject>::cast(proto),
                                           key);
   }
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return isolate->heap()->false_value();
 }
 
@@ -5372,8 +5380,12 @@
     // Fast case: either the key is a real named property or it is not
     // an array index and there are no interceptors or hidden
     // prototypes.
-    if (object->HasRealNamedProperty(isolate, key))
+    if (object->HasRealNamedProperty(isolate, key)) {
+      ASSERT(!isolate->has_scheduled_exception());
       return isolate->heap()->true_value();
+    } else {
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+    }
     Map* map = object->map();
     if (!key_is_array_index &&
         !map->has_named_interceptor() &&
@@ -5403,6 +5415,7 @@
   CONVERT_ARG_CHECKED(Name, key, 1);
 
   bool result = receiver->HasProperty(key);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   if (isolate->has_pending_exception()) return Failure::Exception();
   return isolate->heap()->ToBoolean(result);
 }
@@ -5415,6 +5428,7 @@
   CONVERT_SMI_ARG_CHECKED(index, 1);
 
   bool result = receiver->HasElement(index);
+  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   if (isolate->has_pending_exception()) return Failure::Exception();
   return isolate->heap()->ToBoolean(result);
 }
@@ -5428,7 +5442,12 @@
   CONVERT_ARG_CHECKED(Name, key, 1);
 
   PropertyAttributes att = object->GetLocalPropertyAttribute(key);
-  return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+  if (att == ABSENT || (att & DONT_ENUM) != 0) {
+    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+    return isolate->heap()->false_value();
+  }
+  ASSERT(!isolate->has_scheduled_exception());
+  return isolate->heap()->true_value();
 }
 
 
@@ -5506,6 +5525,7 @@
                                  isolate->heap()->undefined_value(),
                                  v8::ACCESS_KEYS)) {
       isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
       return *isolate->factory()->NewJSArray(0);
     }
     obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
@@ -5525,6 +5545,7 @@
                                  isolate->heap()->undefined_value(),
                                  v8::ACCESS_KEYS)) {
       isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
       return *isolate->factory()->NewJSArray(0);
     }
     int n;
@@ -5651,6 +5672,7 @@
         !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
                              v8::ACCESS_KEYS)) {
       isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+      RETURN_IF_SCHEDULED_EXCEPTION(isolate);
       return *isolate->factory()->NewJSArray(0);
     }
 
@@ -5732,9 +5754,8 @@
   // Handle special arguments properties.
   if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
   if (key->Equals(isolate->heap()->callee_string())) {
-    Object* function = frame->function();
-    if (function->IsJSFunction() &&
-        !JSFunction::cast(function)->shared()->is_classic_mode()) {
+    JSFunction* function = frame->function();
+    if (!function->shared()->is_classic_mode()) {
       return isolate->Throw(*isolate->factory()->NewTypeError(
           "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
     }
@@ -8221,7 +8242,7 @@
 
   JavaScriptFrame* frame = it.frame();
   RUNTIME_ASSERT(frame->function()->IsJSFunction());
-  Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
+  Handle<JSFunction> function(frame->function(), isolate);
   Handle<Code> optimized_code(function->code());
   RUNTIME_ASSERT((type != Deoptimizer::EAGER &&
                   type != Deoptimizer::SOFT) || function->IsOptimized());
@@ -8237,7 +8258,7 @@
   bool has_other_activations = false;
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
-    JSFunction* other_function = JSFunction::cast(frame->function());
+    JSFunction* other_function = frame->function();
     if (frame->is_optimized() && other_function->code() == function->code()) {
       has_other_activations = true;
       break;
@@ -8346,6 +8367,27 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimize) {
+  HandleScope scope(isolate);
+
+  if (args.length() == 0) {
+    // Disable optimization for the calling function.
+    JavaScriptFrameIterator it(isolate);
+    if (!it.done()) {
+      it.frame()->function()->shared()->set_optimization_disabled(true);
+    }
+    return isolate->heap()->undefined_value();
+  }
+
+  // Disable optimization for the functions passed.
+  for (int i = 0; i < args.length(); i++) {
+    CONVERT_ARG_CHECKED(JSFunction, function, i);
+    function->shared()->set_optimization_disabled(true);
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11213,7 +11255,7 @@
     return false;
   }
 
-  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<JSFunction> function(frame->function());
   Handle<SharedFunctionInfo> shared(function->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
 
@@ -11460,7 +11502,7 @@
     : isolate_(isolate),
       frame_(frame),
       inlined_jsframe_index_(inlined_jsframe_index),
-      function_(JSFunction::cast(frame->function())),
+      function_(frame->function()),
       context_(Context::cast(frame->context())),
       nested_scope_chain_(4),
       failed_(false) {
@@ -11841,7 +11883,7 @@
   JavaScriptFrame* frame = frame_it.frame();
 
   Handle<SharedFunctionInfo> shared =
-      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+      Handle<SharedFunctionInfo>(frame->function()->shared());
   Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
 
   int len = 0;
@@ -13744,9 +13786,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
   if (obj->IsJSGlobalProxy()) {
     Object* proto = obj->GetPrototype();
     if (proto->IsNull()) return isolate->heap()->undefined_value();
@@ -13755,21 +13796,8 @@
   }
   ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
            JSObject::cast(obj)->HasFastElements()));
-  if (obj->map()->is_observed() != is_observed) {
-    if (is_observed && obj->IsJSObject() &&
-        !JSObject::cast(obj)->HasExternalArrayElements()) {
-      // Go to dictionary mode, so that we don't skip map checks.
-      MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements();
-      if (maybe->IsFailure()) return maybe;
-      ASSERT(!JSObject::cast(obj)->HasFastElements());
-    }
-    MaybeObject* maybe = obj->map()->Copy();
-    Map* map;
-    if (!maybe->To(&map)) return maybe;
-    map->set_is_observed(is_observed);
-    obj->set_map(map);
-  }
-  return isolate->heap()->undefined_value();
+  ASSERT(obj->IsJSObject());
+  return JSObject::cast(obj)->SetObserved(isolate);
 }
 
 
diff --git a/src/runtime.h b/src/runtime.h
index 70568f9..f730827 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -97,6 +97,7 @@
   F(RunningInSimulator, 0, 1) \
   F(IsParallelRecompilationSupported, 0, 1) \
   F(OptimizeFunctionOnNextCall, -1, 1) \
+  F(NeverOptimize, -1, 1) \
   F(CompleteOptimization, 1, 1) \
   F(GetOptimizationStatus, 1, 1) \
   F(GetOptimizationCount, 1, 1) \
@@ -351,7 +352,7 @@
   \
   /* Harmony observe */ \
   F(IsObserved, 1, 1) \
-  F(SetIsObserved, 2, 1) \
+  F(SetIsObserved, 1, 1) \
   F(SetObserverDeliveryPending, 0, 1) \
   F(GetObservationState, 0, 1) \
   F(ObservationWeakMapCreate, 0, 1) \
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
index dd65cbc..bb386db 100644
--- a/src/store-buffer-inl.h
+++ b/src/store-buffer-inl.h
@@ -74,6 +74,14 @@
 }
 
 
+void StoreBuffer::ClearDeadObject(HeapObject* object) {
+  Address& map_field = Memory::Address_at(object->address());
+  if (heap_->map_space()->Contains(map_field)) {
+    map_field = NULL;
+  }
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 0386280..9705b60 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -364,7 +364,8 @@
         reinterpret_cast<PagedSpace*>(page->owner()),
         page,
         region_callback,
-        &DummyScavengePointer);
+        &DummyScavengePointer,
+        false);
   }
 }
 
@@ -412,7 +413,10 @@
 
 
 void StoreBuffer::FindPointersToNewSpaceInRegion(
-    Address start, Address end, ObjectSlotCallback slot_callback) {
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback,
+    bool clear_maps) {
   for (Address slot_address = start;
        slot_address < end;
        slot_address += kPointerSize) {
@@ -420,6 +424,9 @@
     if (heap_->InNewSpace(*slot)) {
       HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
       ASSERT(object->IsHeapObject());
+      // The new space object was not promoted if it still contains a map
+      // pointer. Clear the map field now lazily.
+      if (clear_maps) ClearDeadObject(object);
       slot_callback(reinterpret_cast<HeapObject**>(slot), object);
       if (heap_->InNewSpace(*slot)) {
         EnterDirectlyIntoStoreBuffer(slot_address);
@@ -446,7 +453,8 @@
 void StoreBuffer::FindPointersToNewSpaceInMaps(
     Address start,
     Address end,
-    ObjectSlotCallback slot_callback) {
+    ObjectSlotCallback slot_callback,
+    bool clear_maps) {
   ASSERT(MapStartAlign(start) == start);
   ASSERT(MapEndAlign(end) == end);
 
@@ -460,7 +468,8 @@
 
     FindPointersToNewSpaceInRegion(pointer_fields_start,
                                    pointer_fields_end,
-                                   slot_callback);
+                                   slot_callback,
+                                   clear_maps);
     map_address += Map::kSize;
   }
 }
@@ -469,7 +478,8 @@
 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
     Address start,
     Address end,
-    ObjectSlotCallback slot_callback) {
+    ObjectSlotCallback slot_callback,
+    bool clear_maps) {
   Address map_aligned_start = MapStartAlign(start);
   Address map_aligned_end   = MapEndAlign(end);
 
@@ -478,7 +488,8 @@
 
   FindPointersToNewSpaceInMaps(map_aligned_start,
                                map_aligned_end,
-                               slot_callback);
+                               slot_callback,
+                               clear_maps);
 }
 
 
@@ -500,7 +511,8 @@
     PagedSpace* space,
     Page* page,
     RegionCallback region_callback,
-    ObjectSlotCallback slot_callback) {
+    ObjectSlotCallback slot_callback,
+    bool clear_maps) {
   Address visitable_start = page->area_start();
   Address end_of_page = page->area_end();
 
@@ -520,7 +532,8 @@
         // After calling this the special garbage section may have moved.
         (this->*region_callback)(visitable_start,
                                  visitable_end,
-                                 slot_callback);
+                                 slot_callback,
+                                 clear_maps);
         if (visitable_end >= space->top() && visitable_end < space->limit()) {
           visitable_end = space->limit();
           visitable_start = visitable_end;
@@ -551,13 +564,15 @@
   if (visitable_start != visitable_end) {
     (this->*region_callback)(visitable_start,
                              visitable_end,
-                             slot_callback);
+                             slot_callback,
+                             clear_maps);
   }
 }
 
 
 void StoreBuffer::IteratePointersInStoreBuffer(
-    ObjectSlotCallback slot_callback) {
+    ObjectSlotCallback slot_callback,
+    bool clear_maps) {
   Address* limit = old_top_;
   old_top_ = old_start_;
   {
@@ -570,6 +585,9 @@
       Object* object = *slot;
       if (heap_->InFromSpace(object)) {
         HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        // The new space object was not promoted if it still contains a map
+        // pointer. Clear the map field now lazily.
+        if (clear_maps) ClearDeadObject(heap_object);
         slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
         if (heap_->InNewSpace(*slot)) {
           EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
@@ -582,6 +600,18 @@
 
 
 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, false);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
+    ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, true);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
+                                            bool clear_maps) {
   // We do not sort or remove duplicated entries from the store buffer because
   // we expect that callback will rebuild the store buffer thus removing
   // all duplicates and pointers to old space.
@@ -590,7 +620,7 @@
   // TODO(gc): we want to skip slots on evacuation candidates
   // but we can't simply figure that out from slot address
   // because slot can belong to a large object.
-  IteratePointersInStoreBuffer(slot_callback);
+  IteratePointersInStoreBuffer(slot_callback, clear_maps);
 
   // We are done scanning all the pointers that were in the store buffer, but
   // there may be some pages marked scan_on_scavenge that have pointers to new
@@ -619,7 +649,7 @@
           ASSERT(array->IsFixedArray());
           Address start = array->address();
           Address end = start + array->Size();
-          FindPointersToNewSpaceInRegion(start, end, slot_callback);
+          FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
         } else {
           Page* page = reinterpret_cast<Page*>(chunk);
           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
@@ -629,7 +659,8 @@
               (owner == heap_->map_space() ?
                  &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
                  &StoreBuffer::FindPointersToNewSpaceInRegion),
-              slot_callback);
+              slot_callback,
+              clear_maps);
         }
       }
     }
diff --git a/src/store-buffer.h b/src/store-buffer.h
index 520cbc0..01e7cbe 100644
--- a/src/store-buffer.h
+++ b/src/store-buffer.h
@@ -43,8 +43,10 @@
 
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
-typedef void (StoreBuffer::*RegionCallback)(
-    Address start, Address end, ObjectSlotCallback slot_callback);
+typedef void (StoreBuffer::*RegionCallback)(Address start,
+                                            Address end,
+                                            ObjectSlotCallback slot_callback,
+                                            bool clear_maps);
 
 // Used to implement the write barrier by collecting addresses of pointers
 // between spaces.
@@ -83,6 +85,10 @@
   // surviving old-to-new pointers into the store buffer to rebuild it.
   void IteratePointersToNewSpace(ObjectSlotCallback callback);
 
+  // Same as IteratePointersToNewSpace but additonally clears maps in objects
+  // referenced from the store buffer that do not contain a forwarding pointer.
+  void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
+
   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -164,9 +170,15 @@
   void Uniq();
   void ExemptPopularPages(int prime_sample_step, int threshold);
 
+  // Set the map field of the object to NULL if contains a map.
+  inline void ClearDeadObject(HeapObject *object);
+
+  void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+
   void FindPointersToNewSpaceInRegion(Address start,
                                       Address end,
-                                      ObjectSlotCallback slot_callback);
+                                      ObjectSlotCallback slot_callback,
+                                      bool clear_maps);
 
   // For each region of pointers on a page in use from an old space call
   // visit_pointer_region callback.
@@ -182,20 +194,24 @@
   void FindPointersToNewSpaceInMaps(
     Address start,
     Address end,
-    ObjectSlotCallback slot_callback);
+    ObjectSlotCallback slot_callback,
+    bool clear_maps);
 
   void FindPointersToNewSpaceInMapsRegion(
     Address start,
     Address end,
-    ObjectSlotCallback slot_callback);
+    ObjectSlotCallback slot_callback,
+    bool clear_maps);
 
   void FindPointersToNewSpaceOnPage(
     PagedSpace* space,
     Page* page,
     RegionCallback region_callback,
-    ObjectSlotCallback slot_callback);
+    ObjectSlotCallback slot_callback,
+    bool clear_maps);
 
-  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+                                    bool clear_maps);
 
 #ifdef VERIFY_HEAP
   void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index a687d81..47ef8f5 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1997,7 +1997,6 @@
           elements_kind,
           transitioned_map->elements_kind(),
           is_js_array,
-          strict_mode(),
           store_mode_).GetCode(isolate());
     } else {
       if (FLAG_compiled_keyed_stores &&
diff --git a/src/type-info.cc b/src/type-info.cc
index 4a10caf..b905a74 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -141,7 +141,7 @@
 bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
   Handle<Object> map_or_code = GetInfo(ast_id);
   if (map_or_code->IsMap()) return false;
-  if (!map_or_code->IsCode()) return true;
+  if (!map_or_code->IsCode()) return false;
   Handle<Code> code = Handle<Code>::cast(map_or_code);
   return code->ic_state() == UNINITIALIZED;
 }
@@ -377,12 +377,9 @@
     CompareIC::StubInfoToType(
         stub_minor_key, left_type, right_type, combined_type, map, isolate());
   } else if (code->is_compare_nil_ic_stub()) {
-    CompareNilICStub::State state(code->compare_nil_state());
-    *combined_type = CompareNilICStub::StateToType(isolate_, state, map);
-    Handle<Type> nil_type = handle(code->compare_nil_value() == kNullValue
-        ? Type::Null() : Type::Undefined(), isolate_);
-    *left_type = *right_type =
-        handle(Type::Union(*combined_type, nil_type), isolate_);
+    CompareNilICStub stub(code->extended_extra_ic_state());
+    *combined_type = stub.GetType(isolate_, map);
+    *left_type = *right_type = stub.GetInputType(isolate_, map);
   }
 }
 
@@ -394,7 +391,7 @@
   }
   Handle<Code> code = Handle<Code>::cast(object);
   ASSERT(code->is_unary_op_stub());
-  return UnaryOpStub(code->extra_ic_state()).GetType(isolate());
+  return UnaryOpStub(code->extended_extra_ic_state()).GetType(isolate());
 }
 
 
diff --git a/src/typedarray.js b/src/typedarray.js
index 57d0c60..ee1fa9d 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -154,7 +154,7 @@
 
   var l = obj.length;
   if (IS_UNDEFINED(l)) {
-    throw MakeTypeError("invalid_argument");
+    return;
   }
   if (intOffset + l > this.length) {
     throw MakeRangeError("typed_array_set_source_too_large");
@@ -259,6 +259,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getInt8', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetInt8(this,
                           ToPositiveDataViewOffset(offset),
                           !!little_endian);
@@ -269,6 +272,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setInt8', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetInt8(this,
                    ToPositiveDataViewOffset(offset),
                    TO_NUMBER_INLINE(value),
@@ -280,6 +286,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getUint8', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetUint8(this,
                            ToPositiveDataViewOffset(offset),
                            !!little_endian);
@@ -290,6 +299,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setUint8', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetUint8(this,
                    ToPositiveDataViewOffset(offset),
                    TO_NUMBER_INLINE(value),
@@ -301,6 +313,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getInt16', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetInt16(this,
                            ToPositiveDataViewOffset(offset),
                            !!little_endian);
@@ -311,6 +326,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setInt16', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetInt16(this,
                     ToPositiveDataViewOffset(offset),
                     TO_NUMBER_INLINE(value),
@@ -322,6 +340,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getUint16', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetUint16(this,
                             ToPositiveDataViewOffset(offset),
                             !!little_endian);
@@ -332,6 +353,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setUint16', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetUint16(this,
                      ToPositiveDataViewOffset(offset),
                      TO_NUMBER_INLINE(value),
@@ -343,6 +367,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getInt32', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetInt32(this,
                            ToPositiveDataViewOffset(offset),
                            !!little_endian);
@@ -353,6 +380,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setInt32', this]);
   }
+  if (%_ArgumentsLength() < 2) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetInt32(this,
                     ToPositiveDataViewOffset(offset),
                     TO_NUMBER_INLINE(value),
@@ -364,6 +394,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getUint32', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetUint32(this,
                             ToPositiveDataViewOffset(offset),
                             !!little_endian);
@@ -374,6 +407,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setUint32', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetUint32(this,
                      ToPositiveDataViewOffset(offset),
                      TO_NUMBER_INLINE(value),
@@ -385,6 +421,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getFloat32', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   return %DataViewGetFloat32(this,
                              ToPositiveDataViewOffset(offset),
                              !!little_endian);
@@ -395,6 +434,9 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setFloat32', this]);
   }
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
+  }
   %DataViewSetFloat32(this,
                       ToPositiveDataViewOffset(offset),
                       TO_NUMBER_INLINE(value),
@@ -406,9 +448,8 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.getFloat64', this]);
   }
-  offset = TO_INTEGER(offset);
-  if (offset < 0) {
-    throw MakeRangeError("invalid_data_view_accessor_offset");
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
   }
   return %DataViewGetFloat64(this,
                              ToPositiveDataViewOffset(offset),
@@ -420,9 +461,8 @@
     throw MakeTypeError('incompatible_method_reciever',
                         ['DataView.setFloat64', this]);
   }
-  offset = TO_INTEGER(offset);
-  if (offset < 0) {
-    throw MakeRangeError("invalid_data_view_accessor_offset");
+  if (%_ArgumentsLength() < 1) {
+    throw MakeTypeError('invalid_argument');
   }
   %DataViewSetFloat64(this,
                       ToPositiveDataViewOffset(offset),
diff --git a/src/types.cc b/src/types.cc
index e88f9f6..ff96b5a 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -515,9 +515,9 @@
     }
     PrintF(out, "}");
   } else if (is_constant()) {
-    PrintF(out, "Constant(%p)", reinterpret_cast<void*>(*as_constant()));
+    PrintF(out, "Constant(%p)", static_cast<void*>(*as_constant()));
   } else if (is_class()) {
-    PrintF(out, "Class(%p)", reinterpret_cast<void*>(*as_class()));
+    PrintF(out, "Class(%p)", static_cast<void*>(*as_class()));
   } else if (is_union()) {
     PrintF(out, "{");
     Handle<Unioned> unioned = as_union();
diff --git a/src/types.h b/src/types.h
index bc6e580..b2eb60c 100644
--- a/src/types.h
+++ b/src/types.h
@@ -291,6 +291,40 @@
   }
 };
 
+
+// A simple struct to represent a pair of lower/upper type bounds.
+struct Bounds {
+  Handle<Type> lower;
+  Handle<Type> upper;
+
+  Bounds() {}
+  Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {}
+  Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {}
+  explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
+  Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
+
+  // Meet: both b1 and b2 are known to hold.
+  static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
+    return Bounds(
+        handle(Type::Union(b1.lower, b2.lower), isl),
+        handle(Type::Intersect(b1.upper, b2.upper), isl));
+  }
+
+  // Join: either b1 or b2 is known to hold.
+  static Bounds Either(Bounds b1, Bounds b2, Isolate* isl) {
+    return Bounds(
+        handle(Type::Intersect(b1.lower, b2.lower), isl),
+        handle(Type::Union(b1.upper, b2.upper), isl));
+  }
+
+  static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+    return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
+  }
+  static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
+    return Bounds(b.lower, handle(Type::Intersect(b.upper, t), isl));
+  }
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_TYPES_H_
diff --git a/src/typing.cc b/src/typing.cc
index 4645950..4220d21 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -248,12 +248,9 @@
 
   expr->condition()->RecordToBooleanTypeFeedback(oracle());
 
-  MergeLowerType(expr, Type::Intersect(
-      expr->then_expression()->lower_type(),
-      expr->else_expression()->lower_type()));
-  MergeUpperType(expr, Type::Union(
-      expr->then_expression()->upper_type(),
-      expr->else_expression()->upper_type()));
+  NarrowType(expr, Bounds::Either(
+      expr->then_expression()->bounds(),
+      expr->else_expression()->bounds(), isolate_));
 }
 
 
@@ -264,14 +261,12 @@
 
 void AstTyper::VisitLiteral(Literal* expr) {
   Type* type = Type::Constant(expr->value(), isolate_);
-  MergeLowerType(expr, type);
-  MergeUpperType(expr, type);
+  NarrowType(expr, Bounds(type, isolate_));
 }
 
 
 void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
-  MergeLowerType(expr, Type::RegExp());
-  MergeUpperType(expr, Type::RegExp());
+  NarrowType(expr, Bounds(Type::RegExp(), isolate_));
 }
 
 
@@ -290,8 +285,7 @@
     }
   }
 
-  MergeLowerType(expr, Type::Object());
-  MergeUpperType(expr, Type::Object());
+  NarrowType(expr, Bounds(Type::Object(), isolate_));
 }
 
 
@@ -302,8 +296,7 @@
     RECURSE(Visit(value));
   }
 
-  MergeLowerType(expr, Type::Array());
-  MergeUpperType(expr, Type::Array());
+  NarrowType(expr, Bounds(Type::Array(), isolate_));
 }
 
 
@@ -320,6 +313,8 @@
         expr->RecordTypeFeedback(oracle(), zone());
       }
     }
+
+    NarrowType(expr, expr->binary_operation()->bounds());
   } else {
     RECURSE(Visit(expr->target()));
     RECURSE(Visit(expr->value()));
@@ -328,8 +323,7 @@
       expr->RecordTypeFeedback(oracle(), zone());
     }
 
-    MergeLowerType(expr, expr->value()->lower_type());
-    MergeUpperType(expr, expr->value()->upper_type());
+    NarrowType(expr, expr->value()->bounds());
   }
   // TODO(rossberg): handle target variables
 }
@@ -346,8 +340,7 @@
 void AstTyper::VisitThrow(Throw* expr) {
   RECURSE(Visit(expr->exception()));
 
-  // Lower type is None already.
-  MergeUpperType(expr, Type::None());
+  NarrowType(expr, Bounds(Type::None(), isolate_));
 }
 
 
@@ -412,7 +405,7 @@
 
   // Collect type feedback.
   Handle<Type> op_type = oracle()->UnaryType(expr->UnaryOperationFeedbackId());
-  MergeLowerType(expr->expression(), op_type);
+  NarrowLowerType(expr->expression(), op_type);
   if (expr->op() == Token::NOT) {
     // TODO(rossberg): only do in test or value context.
     expr->expression()->RecordToBooleanTypeFeedback(oracle());
@@ -421,27 +414,23 @@
   switch (expr->op()) {
     case Token::NOT:
     case Token::DELETE:
-      MergeLowerType(expr, Type::Boolean());
-      MergeUpperType(expr, Type::Boolean());
+      NarrowType(expr, Bounds(Type::Boolean(), isolate_));
       break;
     case Token::VOID:
-      MergeLowerType(expr, Type::Undefined());
-      MergeUpperType(expr, Type::Undefined());
+      NarrowType(expr, Bounds(Type::Undefined(), isolate_));
       break;
     case Token::ADD:
     case Token::SUB: {
-      MergeLowerType(expr, Type::Smi());
-      Type* upper = *expr->expression()->upper_type();
-      MergeUpperType(expr, upper->Is(Type::Number()) ? upper : Type::Number());
+      Type* upper = *expr->expression()->bounds().upper;
+      if (!upper->Is(Type::Number())) upper = Type::Number();
+      NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
       break;
     }
     case Token::BIT_NOT:
-      MergeLowerType(expr, Type::Smi());
-      MergeUpperType(expr, Type::Signed32());
+      NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
       break;
     case Token::TYPEOF:
-      MergeLowerType(expr, Type::InternalizedString());
-      MergeUpperType(expr, Type::InternalizedString());
+      NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
       break;
     default:
       UNREACHABLE();
@@ -458,8 +447,7 @@
     prop->RecordTypeFeedback(oracle(), zone());
   }
 
-  MergeLowerType(expr, Type::Smi());
-  MergeUpperType(expr, Type::Number());
+  NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
 }
 
 
@@ -472,9 +460,9 @@
   Maybe<int> fixed_right_arg;
   oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
       &left_type, &right_type, &type, &fixed_right_arg);
-  MergeLowerType(expr, type);
-  MergeLowerType(expr->left(), left_type);
-  MergeLowerType(expr->right(), right_type);
+  NarrowLowerType(expr, type);
+  NarrowLowerType(expr->left(), left_type);
+  NarrowLowerType(expr->right(), right_type);
   expr->set_fixed_right_arg(fixed_right_arg);
   if (expr->op() == Token::OR || expr->op() == Token::AND) {
     expr->left()->RecordToBooleanTypeFeedback(oracle());
@@ -482,56 +470,50 @@
 
   switch (expr->op()) {
     case Token::COMMA:
-      MergeLowerType(expr, expr->right()->lower_type());
-      MergeUpperType(expr, expr->right()->upper_type());
+      NarrowType(expr, expr->right()->bounds());
       break;
     case Token::OR:
     case Token::AND:
-      MergeLowerType(expr, Type::Intersect(
-          expr->left()->lower_type(), expr->right()->lower_type()));
-      MergeUpperType(expr, Type::Union(
-          expr->left()->upper_type(), expr->right()->upper_type()));
+      NarrowType(expr, Bounds::Either(
+          expr->left()->bounds(), expr->right()->bounds(), isolate_));
       break;
     case Token::BIT_OR:
     case Token::BIT_AND: {
-      MergeLowerType(expr, Type::Smi());
-      Type* upper =
-          Type::Union(expr->left()->upper_type(), expr->right()->upper_type());
-      MergeUpperType(expr,
-          upper->Is(Type::Signed32()) ? upper : Type::Signed32());
+      Type* upper = Type::Union(
+          expr->left()->bounds().upper, expr->right()->bounds().upper);
+      if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
+      NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
       break;
     }
     case Token::BIT_XOR:
     case Token::SHL:
     case Token::SAR:
-      MergeLowerType(expr, Type::Smi());
-      MergeUpperType(expr, Type::Signed32());
+      NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
       break;
     case Token::SHR:
-      MergeLowerType(expr, Type::Smi());
-      MergeUpperType(expr, Type::Unsigned32());
+      NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
       break;
     case Token::ADD: {
-      Handle<Type> l = expr->left()->lower_type();
-      Handle<Type> r = expr->right()->lower_type();
-      MergeLowerType(expr,
-          l->Is(Type::Number()) && r->Is(Type::Number()) ? Type::Smi() :
-          l->Is(Type::String()) || r->Is(Type::String()) ? Type::String() :
-              Type::None());
-      l = expr->left()->upper_type();
-      r = expr->right()->upper_type();
-      MergeUpperType(expr,
-          l->Is(Type::Number()) && r->Is(Type::Number()) ? Type::Number() :
-          l->Is(Type::String()) || r->Is(Type::String()) ? Type::String() :
-              Type::NumberOrString());
+      Bounds l = expr->left()->bounds();
+      Bounds r = expr->right()->bounds();
+      Type* lower =
+          l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+              Type::Smi() :
+          l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
+              Type::String() : Type::None();
+      Type* upper =
+          l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+              Type::Number() :
+          l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
+              Type::String() : Type::NumberOrString();
+      NarrowType(expr, Bounds(lower, upper, isolate_));
       break;
     }
     case Token::SUB:
     case Token::MUL:
     case Token::DIV:
     case Token::MOD:
-      MergeLowerType(expr, Type::Smi());
-      MergeUpperType(expr, Type::Number());
+      NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
       break;
     default:
       UNREACHABLE();
@@ -547,12 +529,11 @@
   Handle<Type> left_type, right_type, combined_type;
   oracle()->CompareType(expr->CompareOperationFeedbackId(),
       &left_type, &right_type, &combined_type);
-  MergeLowerType(expr->left(), left_type);
-  MergeLowerType(expr->right(), right_type);
+  NarrowLowerType(expr->left(), left_type);
+  NarrowLowerType(expr->right(), right_type);
   expr->set_combined_type(combined_type);
 
-  MergeLowerType(expr, Type::Boolean());
-  MergeUpperType(expr, Type::Boolean());
+  NarrowType(expr, Bounds(Type::Boolean(), isolate_));
 }
 
 
diff --git a/src/typing.h b/src/typing.h
index b37a0cb..ceef984 100644
--- a/src/typing.h
+++ b/src/typing.h
@@ -34,6 +34,7 @@
 #include "ast.h"
 #include "compiler.h"
 #include "type-info.h"
+#include "types.h"
 #include "zone.h"
 #include "scopes.h"
 
@@ -62,17 +63,11 @@
   TypeFeedbackOracle* oracle() { return &oracle_; }
   Zone* zone() const { return info_->zone(); }
 
-  void MergeLowerType(Expression* e, Handle<Type> t) {
-    e->set_lower_type(handle(Type::Union(e->lower_type(), t), isolate_));
+  void NarrowType(Expression* e, Bounds b) {
+    e->set_bounds(Bounds::Both(e->bounds(), b, isolate_));
   }
-  void MergeUpperType(Expression* e, Handle<Type> t) {
-    e->set_upper_type(handle(Type::Intersect(e->upper_type(), t), isolate_));
-  }
-  void MergeLowerType(Expression* e, Type* t) {
-    MergeLowerType(e, handle(t, isolate_));
-  }
-  void MergeUpperType(Expression* e, Type* t) {
-    MergeUpperType(e, handle(t, isolate_));
+  void NarrowLowerType(Expression* e, Handle<Type> t) {
+    e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
   }
 
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/src/v8globals.h b/src/v8globals.h
index 4932da9..c3f1f01 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -418,6 +418,17 @@
 };
 
 
+enum CpuPart {
+  CPU_UNKNOWN,
+  CORTEX_A15,
+  CORTEX_A12,
+  CORTEX_A9,
+  CORTEX_A8,
+  CORTEX_A7,
+  CORTEX_A5
+};
+
+
 // Feature flags bit positions. They are mostly based on the CPUID spec.
 // (We assign CPUID itself to one of the currently reserved bits --
 // feel free to change this if needed.)
@@ -434,6 +445,7 @@
                   UNALIGNED_ACCESSES = 4,  // ARM
                   MOVW_MOVT_IMMEDIATE_LOADS = 5,  // ARM
                   VFP32DREGS = 6,  // ARM
+                  NEON = 7,    // ARM
                   SAHF = 0,    // x86
                   FPU = 1};    // MIPS
 
diff --git a/src/v8utils.h b/src/v8utils.h
index ff9f8f2..fd3f4a5 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -317,6 +317,11 @@
 INLINE(static void CopyCharsUnsigned(sinkchar* dest,
                                      const sourcechar* src,
                                      int chars));
+#if defined(V8_HOST_ARCH_ARM)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#endif
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
@@ -375,6 +380,105 @@
 }
 
 
+#if defined(V8_HOST_ARCH_ARM)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+  switch (static_cast<unsigned>(chars)) {
+    case 0:
+      break;
+    case 1:
+      *dest = *src;
+      break;
+    case 2:
+      memcpy(dest, src, 2);
+      break;
+    case 3:
+      memcpy(dest, src, 3);
+      break;
+    case 4:
+      memcpy(dest, src, 4);
+      break;
+    case 5:
+      memcpy(dest, src, 5);
+      break;
+    case 6:
+      memcpy(dest, src, 6);
+      break;
+    case 7:
+      memcpy(dest, src, 7);
+      break;
+    case 8:
+      memcpy(dest, src, 8);
+      break;
+    case 9:
+      memcpy(dest, src, 9);
+      break;
+    case 10:
+      memcpy(dest, src, 10);
+      break;
+    case 11:
+      memcpy(dest, src, 11);
+      break;
+    case 12:
+      memcpy(dest, src, 12);
+      break;
+    case 13:
+      memcpy(dest, src, 13);
+      break;
+    case 14:
+      memcpy(dest, src, 14);
+      break;
+    case 15:
+      memcpy(dest, src, 15);
+      break;
+    default:
+      OS::MemCopy(dest, src, chars);
+      break;
+  }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
+  if (chars >= OS::kMinComplexConvertMemCopy) {
+    OS::MemCopyUint16Uint8(dest, src, chars);
+  } else {
+    OS::MemCopyUint16Uint8Wrapper(dest, src, chars);
+  }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+  switch (static_cast<unsigned>(chars)) {
+    case 0:
+      break;
+    case 1:
+      *dest = *src;
+      break;
+    case 2:
+      memcpy(dest, src, 4);
+      break;
+    case 3:
+      memcpy(dest, src, 6);
+      break;
+    case 4:
+      memcpy(dest, src, 8);
+      break;
+    case 5:
+      memcpy(dest, src, 10);
+      break;
+    case 6:
+      memcpy(dest, src, 12);
+      break;
+    case 7:
+      memcpy(dest, src, 14);
+      break;
+    default:
+      OS::MemCopy(dest, src, chars * sizeof(*dest));
+      break;
+  }
+}
+#endif
+
+
 class StringBuilder : public SimpleStringBuilder {
  public:
   explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
diff --git a/src/version.cc b/src/version.cc
index 5241654..3ec15f2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     20
-#define BUILD_NUMBER      4
-#define PATCH_LEVEL       2
+#define BUILD_NUMBER      5
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0c0c272..0b74ace 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -258,6 +258,17 @@
 }
 
 
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { rax, rbx, rcx, rdx };
+  descriptor->register_param_count_ = 4;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index f2f7ed0..d7a73d7 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -451,16 +451,11 @@
   // Get the bailout id from the stack.
   __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
 
-  // Get the address of the location in the code object if possible
+  // Get the address of the location in the code object
   // and compute the fp-to-sp delta in register arg5.
-  if (type() == EAGER || type() == SOFT) {
-    __ Set(arg_reg_4, 0);
-    __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
-  } else {
-    __ movq(arg_reg_4,
-            Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
-    __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
-  }
+  __ movq(arg_reg_4,
+          Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+  __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
 
   __ subq(arg5, rbp);
   __ neg(arg5);
@@ -503,12 +498,8 @@
     __ pop(Operand(rbx, dst_offset));
   }
 
-  // Remove the bailout id from the stack.
-  if (type() == EAGER || type() == SOFT) {
-    __ addq(rsp, Immediate(kPointerSize));
-  } else {
-    __ addq(rsp, Immediate(2 * kPointerSize));
-  }
+  // Remove the bailout id and return address from the stack.
+  __ addq(rsp, Immediate(2 * kPointerSize));
 
   // Compute a pointer to the unwinding limit in register rcx; that is
   // the first stack slot not part of the input frame.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9dfc972..170474e 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -281,8 +281,7 @@
 
 
 bool LCodeGen::GenerateJumpTable() {
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
+  Label needs_frame;
   if (jump_table_.length() > 0) {
     Comment(";;; -------------------- Jump table --------------------");
   }
@@ -298,47 +297,24 @@
     }
     if (jump_table_[i].needs_frame) {
       __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
-      if (type == Deoptimizer::LAZY) {
-        if (needs_frame_is_call.is_bound()) {
-          __ jmp(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ push(rbp);
-          __ movq(rbp, rsp);
-          __ push(rsi);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
-          __ push(rsi);
-          __ movq(rsi, MemOperand(rsp, kPointerSize));
-          __ call(kScratchRegister);
-        }
+      if (needs_frame.is_bound()) {
+        __ jmp(&needs_frame);
       } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ jmp(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ push(rbp);
-          __ movq(rbp, rsp);
-          __ push(rsi);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
-          __ push(rsi);
-          __ movq(rsi, MemOperand(rsp, kPointerSize));
-          __ jmp(kScratchRegister);
-        }
+        __ bind(&needs_frame);
+        __ push(rbp);
+        __ movq(rbp, rsp);
+        __ push(rsi);
+        // This variant of deopt can only be used with stubs. Since we don't
+        // have a function pointer to install in the stack frame that we're
+        // building, install a special marker there instead.
+        ASSERT(info()->IsStub());
+        __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+        __ push(rsi);
+        __ movq(rsi, MemOperand(rsp, kPointerSize));
+        __ call(kScratchRegister);
       }
     } else {
-      if (type == Deoptimizer::LAZY) {
-        __ call(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      }
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
     }
   }
   return !is_aborted();
@@ -689,13 +665,8 @@
   }
 
   ASSERT(info()->IsStub() || frame_is_built_);
-  bool needs_lazy_deopt = info()->IsStub();
   if (cc == no_condition && frame_is_built_) {
-    if (needs_lazy_deopt) {
-      __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    } else {
-      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-    }
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index adb1afa..71db17c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -205,16 +205,12 @@
     } else if (destination->IsDoubleRegister()) {
       double v = cgen_->ToDouble(constant_source);
       uint64_t int_val = BitCast<uint64_t, double>(v);
-      int32_t lower = static_cast<int32_t>(int_val);
-      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
       XMMRegister dst = cgen_->ToDoubleRegister(destination);
       if (int_val == 0) {
         __ xorps(dst, dst);
       } else {
-        __ push(Immediate(upper));
-        __ push(Immediate(lower));
-        __ movsd(dst, Operand(rsp, 0));
-        __ addq(rsp, Immediate(kDoubleSize));
+        __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+        __ movq(dst, kScratchRegister);
       }
     } else {
       ASSERT(destination->IsStackSlot());
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 4fa0683..478e98a 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -328,7 +328,6 @@
   stream->Add("= ");
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
-  ASSERT(hydrogen()->property_cell()->value()->IsSmi());
   ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 4e5d649..ad78606 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -54,7 +54,6 @@
         'test-bignum.cc',
         'test-bignum-dtoa.cc',
         'test-circular-queue.cc',
-        'test-compare-nil-ic-stub.cc',
         'test-compiler.cc',
         'test-conversions.cc',
         'test-cpu-profiler.cc',
@@ -111,6 +110,7 @@
         ['v8_target_arch=="ia32"', {
           'sources': [
             'test-assembler-ia32.cc',
+            'test-code-stubs-ia32.cc',
             'test-disasm-ia32.cc',
             'test-log-stack-tracer.cc'
           ],
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index f8f1930..43a243d 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -2340,6 +2340,16 @@
 }
 
 
+THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
+  LocalContext env;
+  v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+  v8::Local<v8::Object> global = env->Global();
+  global->Set(0, v8::String::New("value"));
+  CHECK(global->HasRealIndexedProperty(0));
+}
+
+
 static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
                                                void* value) {
   CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
@@ -19705,4 +19715,180 @@
   ThreadInterruptTest().RunTest();
 }
 
+
+static bool NamedAccessAlwaysBlocked(Local<v8::Object> global,
+                                     Local<Value> name,
+                                     v8::AccessType type,
+                                     Local<Value> data) {
+  i::PrintF("Named access blocked.\n");
+  return false;
+}
+
+
+static bool IndexAccessAlwaysBlocked(Local<v8::Object> global,
+                                     uint32_t key,
+                                     v8::AccessType type,
+                                     Local<Value> data) {
+  i::PrintF("Indexed access blocked.\n");
+  return false;
+}
+
+
+void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  CHECK(false);
+}
+
+
+TEST(JSONStringifyAccessCheck) {
+  v8::V8::Initialize();
+  v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+  // Create an ObjectTemplate for global objects and install access
+  // check callbacks that will block access.
+  v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+  global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
+                                           IndexAccessAlwaysBlocked);
+
+  // Create a context and set an x property on it's global object.
+  LocalContext context0(NULL, global_template);
+  v8::Handle<v8::Object> global0 = context0->Global();
+  global0->Set(v8_str("x"), v8_num(42));
+  ExpectString("JSON.stringify(this)", "{\"x\":42}");
+
+  for (int i = 0; i < 2; i++) {
+    if (i == 1) {
+      // Install a toJSON function on the second run.
+      v8::Handle<v8::FunctionTemplate> toJSON =
+          v8::FunctionTemplate::New(UnreachableCallback);
+
+      global0->Set(v8_str("toJSON"), toJSON->GetFunction());
+    }
+    // Create a context with a different security token so that the
+    // failed access check callback will be called on each access.
+    LocalContext context1(NULL, global_template);
+    context1->Global()->Set(v8_str("other"), global0);
+
+    ExpectString("JSON.stringify(other)", "{}");
+    ExpectString("JSON.stringify({ 'a' : other, 'b' : ['c'] })",
+                 "{\"a\":{},\"b\":[\"c\"]}");
+    ExpectString("JSON.stringify([other, 'b', 'c'])",
+                 "[{},\"b\",\"c\"]");
+
+    v8::Handle<v8::Array> array = v8::Array::New(2);
+    array->Set(0, v8_str("a"));
+    array->Set(1, v8_str("b"));
+    context1->Global()->Set(v8_str("array"), array);
+    ExpectString("JSON.stringify(array)", "[\"a\",\"b\"]");
+    array->TurnOnAccessCheck();
+    ExpectString("JSON.stringify(array)", "[]");
+    ExpectString("JSON.stringify([array])", "[[]]");
+    ExpectString("JSON.stringify({'a' : array})", "{\"a\":[]}");
+  }
+}
+
+
+bool access_check_fail_thrown = false;
+bool catch_callback_called = false;
+
+
+// Failed access check callback that performs a GC on each invocation.
+void FailedAccessCheckThrows(Local<v8::Object> target,
+                             v8::AccessType type,
+                             Local<v8::Value> data) {
+  access_check_fail_thrown = true;
+  i::PrintF("Access check failed. Error thrown.\n");
+  v8::ThrowException(v8::Exception::Error(v8_str("cross context")));
+}
+
+
+void CatcherCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  for (int i = 0; i < args.Length(); i++) {
+    i::PrintF("%s\n", *String::Utf8Value(args[i]));
+  }
+  catch_callback_called = true;
+}
+
+
+void HasOwnPropertyCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  args[0]->ToObject()->HasOwnProperty(args[1]->ToString());
+}
+
+
+void CheckCorrectThrow(const char* script) {
+  // Test that the script, when wrapped into a try-catch, triggers the catch
+  // clause due to failed access check throwing an exception.
+  // The subsequent try-catch should run without any exception.
+  access_check_fail_thrown = false;
+  catch_callback_called = false;
+  i::ScopedVector<char> source(1024);
+  i::OS::SNPrintF(source, "try { %s; } catch (e) { catcher(e); }", script);
+  CompileRun(source.start());
+  CHECK(access_check_fail_thrown);
+  CHECK(catch_callback_called);
+
+  access_check_fail_thrown = false;
+  catch_callback_called = false;
+  CompileRun("try { [1, 2, 3].sort(); } catch (e) { catcher(e) };");
+  CHECK(!access_check_fail_thrown);
+  CHECK(!catch_callback_called);
+}
+
+
+TEST(AccessCheckThrows) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::V8::Initialize();
+  v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
+  v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+  // Create an ObjectTemplate for global objects and install access
+  // check callbacks that will block access.
+  v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+  global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
+                                           IndexAccessAlwaysBlocked);
+
+  // Create a context and set an x property on it's global object.
+  LocalContext context0(NULL, global_template);
+  context0->Global()->Set(v8_str("x"), v8_num(42));
+  v8::Handle<v8::Object> global0 = context0->Global();
+
+  // Create a context with a different security token so that the
+  // failed access check callback will be called on each access.
+  LocalContext context1(NULL, global_template);
+  context1->Global()->Set(v8_str("other"), global0);
+
+  v8::Handle<v8::FunctionTemplate> catcher_fun =
+      v8::FunctionTemplate::New(CatcherCallback);
+  context1->Global()->Set(v8_str("catcher"), catcher_fun->GetFunction());
+
+  v8::Handle<v8::FunctionTemplate> has_own_property_fun =
+      v8::FunctionTemplate::New(HasOwnPropertyCallback);
+  context1->Global()->Set(v8_str("has_own_property"),
+                          has_own_property_fun->GetFunction());
+
+  { v8::TryCatch try_catch;
+    access_check_fail_thrown = false;
+    CompileRun("other.x;");
+    CHECK(access_check_fail_thrown);
+    CHECK(try_catch.HasCaught());
+  }
+
+  CheckCorrectThrow("other.x");
+  CheckCorrectThrow("other[1]");
+  CheckCorrectThrow("JSON.stringify(other)");
+  CheckCorrectThrow("has_own_property(other, 'x')");
+  CheckCorrectThrow("%GetProperty(other, 'x')");
+  CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 1, 0)");
+  CheckCorrectThrow("%IgnoreAttributesAndSetProperty(other, 'x', 'foo')");
+  CheckCorrectThrow("%DeleteProperty(other, 'x', 0)");
+  CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
+  CheckCorrectThrow("%HasLocalProperty(other, 'x')");
+  CheckCorrectThrow("%HasProperty(other, 'x')");
+  CheckCorrectThrow("%HasElement(other, 1)");
+  CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
+  CheckCorrectThrow("%GetPropertyNames(other)");
+  CheckCorrectThrow("%GetLocalPropertyNames(other, true)");
+  CheckCorrectThrow("%DefineOrRedefineAccessorProperty("
+                        "other, 'x', null, null, 1)");
+}
+
 #endif  // WIN32
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index cb44ddc..c79e740 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -1227,4 +1227,186 @@
   CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
 }
 
+
+TEST(15) {
+  // Test the Neon instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    uint32_t src0;
+    uint32_t src1;
+    uint32_t src2;
+    uint32_t src3;
+    uint32_t src4;
+    uint32_t src5;
+    uint32_t src6;
+    uint32_t src7;
+    uint32_t dst0;
+    uint32_t dst1;
+    uint32_t dst2;
+    uint32_t dst3;
+    uint32_t dst4;
+    uint32_t dst5;
+    uint32_t dst6;
+    uint32_t dst7;
+    uint32_t srcA0;
+    uint32_t srcA1;
+    uint32_t dstA0;
+    uint32_t dstA1;
+    uint32_t dstA2;
+    uint32_t dstA3;
+  } T;
+  T t;
+
+  // Create a function that accepts &t, and loads, manipulates, and stores
+  // the doubles and floats.
+  Assembler assm(isolate, NULL, 0);
+
+
+  if (CpuFeatures::IsSupported(NEON)) {
+    CpuFeatureScope scope(&assm, NEON);
+
+    __ stm(db_w, sp, r4.bit() | lr.bit());
+    // Move 32 bytes with neon.
+    __ add(r4, r0, Operand(OFFSET_OF(T, src0)));
+    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
+    __ add(r4, r0, Operand(OFFSET_OF(T, dst0)));
+    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
+
+    // Expand 8 bytes into 8 words(16 bits).
+    __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
+    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(r4));
+    __ vmovl(NeonU8, q0, d0);
+    __ add(r4, r0, Operand(OFFSET_OF(T, dstA0)));
+    __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
+
+  __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+    CodeDesc desc;
+    assm.GetCode(&desc);
+    Object* code = isolate->heap()->CreateCode(
+        desc,
+        Code::ComputeFlags(Code::STUB),
+        Handle<Code>())->ToObjectChecked();
+    CHECK(code->IsCode());
+#ifdef DEBUG
+    Code::cast(code)->Print();
+#endif
+    F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+    t.src0 = 0x01020304;
+    t.src1 = 0x11121314;
+    t.src2 = 0x21222324;
+    t.src3 = 0x31323334;
+    t.src4 = 0x41424344;
+    t.src5 = 0x51525354;
+    t.src6 = 0x61626364;
+    t.src7 = 0x71727374;
+    t.dst0 = 0;
+    t.dst1 = 0;
+    t.dst2 = 0;
+    t.dst3 = 0;
+    t.dst4 = 0;
+    t.dst5 = 0;
+    t.dst6 = 0;
+    t.dst7 = 0;
+    t.srcA0 = 0x41424344;
+    t.srcA1 = 0x81828384;
+    t.dstA0 = 0;
+    t.dstA1 = 0;
+    t.dstA2 = 0;
+    t.dstA3 = 0;
+    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+    USE(dummy);
+    CHECK_EQ(0x01020304, t.dst0);
+    CHECK_EQ(0x11121314, t.dst1);
+    CHECK_EQ(0x21222324, t.dst2);
+    CHECK_EQ(0x31323334, t.dst3);
+    CHECK_EQ(0x41424344, t.dst4);
+    CHECK_EQ(0x51525354, t.dst5);
+    CHECK_EQ(0x61626364, t.dst6);
+    CHECK_EQ(0x71727374, t.dst7);
+    CHECK_EQ(0x00430044, t.dstA0);
+    CHECK_EQ(0x00410042, t.dstA1);
+    CHECK_EQ(0x00830084, t.dstA2);
+    CHECK_EQ(0x00810082, t.dstA3);
+  }
+}
+
+
+TEST(16) {
+  // Test the pkh, uxtb, uxtab and uxtb16 instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    uint32_t src0;
+    uint32_t src1;
+    uint32_t src2;
+    uint32_t dst0;
+    uint32_t dst1;
+    uint32_t dst2;
+    uint32_t dst3;
+    uint32_t dst4;
+  } T;
+  T t;
+
+  // Create a function that accepts &t, and loads, manipulates, and stores
+  // the doubles and floats.
+  Assembler assm(isolate, NULL, 0);
+
+  __ stm(db_w, sp, r4.bit() | lr.bit());
+
+  __ mov(r4, Operand(r0));
+  __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src0)));
+  __ ldr(r1, MemOperand(r4, OFFSET_OF(T, src1)));
+
+  __ pkhbt(r2, r0, Operand(r1, LSL, 8));
+  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst0)));
+
+  __ pkhtb(r2, r0, Operand(r1, ASR, 8));
+  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst1)));
+
+  __ uxtb16(r2, Operand(r0, ROR, 8));
+  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst2)));
+
+  __ uxtb(r2, Operand(r0, ROR, 8));
+  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst3)));
+
+  __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src2)));
+  __ uxtab(r2, r0, Operand(r1, ROR, 8));
+  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst4)));
+
+  __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code = isolate->heap()->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Code>())->ToObjectChecked();
+  CHECK(code->IsCode());
+#ifdef DEBUG
+  Code::cast(code)->Print();
+#endif
+  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+  t.src0 = 0x01020304;
+  t.src1 = 0x11121314;
+  t.src2 = 0x11121300;
+  t.dst0 = 0;
+  t.dst1 = 0;
+  t.dst2 = 0;
+  t.dst3 = 0;
+  t.dst4 = 0;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+  CHECK_EQ(0x12130304, t.dst0);
+  CHECK_EQ(0x01021213, t.dst1);
+  CHECK_EQ(0x00010003, t.dst2);
+  CHECK_EQ(0x00000003, t.dst3);
+  CHECK_EQ(0x11121313, t.dst4);
+}
+
 #undef __
diff --git a/test/cctest/test-code-stubs-ia32.cc b/test/cctest/test-code-stubs-ia32.cc
new file mode 100644
index 0000000..8e599b3
--- /dev/null
+++ b/test/cctest/test-code-stubs-ia32.cc
@@ -0,0 +1,278 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+#if __GNUC__
+#define STDCALL  __attribute__((stdcall))
+#else
+#define STDCALL  __stdcall
+#endif
+
+using namespace v8::internal;
+
+
+typedef int32_t STDCALL ConvertDToIFuncType(double input);
+typedef ConvertDToIFuncType* ConvertDToIFunc;
+
+
+int STDCALL ConvertDToICVersion(double d) {
+  Address double_ptr = reinterpret_cast<Address>(&d);
+  uint32_t exponent_bits = Memory::uint32_at(double_ptr + kDoubleSize / 2);
+  int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
+  int32_t exponent = (((exponent_bits & shifted_mask) >>
+                       (Double::kPhysicalSignificandSize - 32)) -
+                      HeapNumber::kExponentBias);
+  uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
+  int result = 0;
+  uint32_t max_exponent =
+    static_cast<uint32_t>(Double::kPhysicalSignificandSize);
+  if (unsigned_exponent >= max_exponent) {
+    if ((exponent - Double::kPhysicalSignificandSize) < 32) {
+      result = Memory::uint32_at(double_ptr) <<
+        (exponent - Double::kPhysicalSignificandSize);
+    }
+  } else {
+    uint64_t big_result =
+        (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
+    big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
+    result = static_cast<uint32_t>(big_result);
+  }
+  if (static_cast<int32_t>(exponent_bits) < 0) {
+    return (0 - result);
+  } else {
+    return result;
+  }
+}
+
+
+void RunOneTruncationTestWithTest(ConvertDToIFunc func,
+                                  double from,
+                                  double raw) {
+  uint64_t to = static_cast<int64_t>(raw);
+  int result = (*func)(from);
+  CHECK_EQ(static_cast<int>(to), result);
+}
+
+
+// #define NaN and Infinity so that it's possible to cut-and-paste these tests
+// directly to a .js file and run them.
+#define NaN (OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+#define RunOneTruncationTest(p1, p2) RunOneTruncationTestWithTest(func, p1, p2)
+
+void RunAllTruncationTests(ConvertDToIFunc func) {
+  RunOneTruncationTest(0, 0);
+  RunOneTruncationTest(0.5, 0);
+  RunOneTruncationTest(-0.5, 0);
+  RunOneTruncationTest(1.5, 1);
+  RunOneTruncationTest(-1.5, -1);
+  RunOneTruncationTest(5.5, 5);
+  RunOneTruncationTest(-5.0, -5);
+  RunOneTruncationTest(NaN, 0);
+  RunOneTruncationTest(Infinity, 0);
+  RunOneTruncationTest(-NaN, 0);
+  RunOneTruncationTest(-Infinity, 0);
+
+  RunOneTruncationTest(4.5036e+15, 0x1635E000);
+  RunOneTruncationTest(-4.5036e+15, -372629504);
+
+  RunOneTruncationTest(4503603922337791.0, -1);
+  RunOneTruncationTest(-4503603922337791.0, 1);
+  RunOneTruncationTest(4503601774854143.0, 2147483647);
+  RunOneTruncationTest(-4503601774854143.0, -2147483647);
+  RunOneTruncationTest(9007207844675582.0, -2);
+  RunOneTruncationTest(-9007207844675582.0, 2);
+  RunOneTruncationTest(2.4178527921507624e+24, -536870912);
+  RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
+  RunOneTruncationTest(2.417853945072267e+24, -536870912);
+  RunOneTruncationTest(-2.417853945072267e+24, 536870912);
+
+  RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
+  RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
+  RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
+  RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+
+  RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
+  RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
+  RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
+  RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+}
+
+#undef NaN
+#undef Infinity
+#undef RunOneTruncationTest
+
+#define __ assm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+                                              Register source_reg,
+                                              Register destination_reg) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                   &actual_size,
+                                                   true));
+  CHECK(buffer);
+  HandleScope handles(isolate);
+  MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+  assm.set_allow_stub_calls(false);
+  int offset =
+    source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+  DoubleToIStub stub(source_reg, destination_reg, offset, true);
+  byte* start = stub.GetCode(isolate)->instruction_start();
+
+  __ push(ebx);
+  __ push(ecx);
+  __ push(edx);
+  __ push(esi);
+  __ push(edi);
+
+  if (!source_reg.is(esp)) {
+    __ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
+  }
+
+  int param_offset = 7 * kPointerSize;
+  // Save registers make sure they don't get clobbered.
+  int reg_num = 0;
+  for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+    Register reg = Register::from_code(reg_num);
+    if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+      __ push(reg);
+      param_offset += kPointerSize;
+    }
+  }
+
+  // Re-push the double argument
+  __ push(MemOperand(esp, param_offset));
+  __ push(MemOperand(esp, param_offset));
+
+  // Call through to the actual stub
+  __ call(start, RelocInfo::EXTERNAL_REFERENCE);
+
+  __ add(esp, Immediate(kDoubleSize));
+
+  // Make sure no registers have been unexpectedly clobbered
+  for (--reg_num; reg_num >= 0; --reg_num) {
+    Register reg = Register::from_code(reg_num);
+    if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+      __ cmp(reg, MemOperand(esp, 0));
+      __ Assert(equal, "register was clobbered");
+      __ add(esp, Immediate(kPointerSize));
+    }
+  }
+
+  __ mov(eax, destination_reg);
+
+  __ pop(edi);
+  __ pop(esi);
+  __ pop(edx);
+  __ pop(ecx);
+  __ pop(ebx);
+
+  __ ret(kDoubleSize);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  return reinterpret_cast<ConvertDToIFunc>(
+      reinterpret_cast<intptr_t>(buffer));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+  return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+TEST(ConvertDToI) {
+  CcTest::InitializeVM();
+  LocalContext context;
+  Isolate* isolate = GetIsolateFrom(&context);
+  HandleScope scope(isolate);
+
+#if DEBUG
+  // Verify that the tests actually work with the C version. In the release
+  // code, the compiler optimizes it away because it's all constant, but does it
+  // wrong, triggering an assert on gcc.
+  RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, esi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, eax));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ebx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ecx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edx));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edi));
+  RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, esi));
+}
diff --git a/test/cctest/test-compare-nil-ic-stub.cc b/test/cctest/test-compare-nil-ic-stub.cc
deleted file mode 100644
index 78bb6fd..0000000
--- a/test/cctest/test-compare-nil-ic-stub.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2006-2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-#include "cctest.h"
-#include "code-stubs.h"
-
-
-using namespace v8::internal;
-
-typedef CompareNilICStub::State State;
-
-TEST(StateConstructors) {
-  State state;
-  state.Add(CompareNilICStub::MONOMORPHIC_MAP);
-  State state2(state);
-  CHECK_EQ(state.ToIntegral(), state2.ToIntegral());
-}
-
-
-TEST(ExternalICStateParsing) {
-  State state;
-  state.Add(CompareNilICStub::UNDEFINED);
-  CompareNilICStub stub(kUndefinedValue, state);
-  CompareNilICStub stub2(stub.GetExtraICState());
-  CHECK_EQ(stub.GetNilValue(), stub2.GetNilValue());
-  CHECK_EQ(stub.GetState().ToIntegral(), stub2.GetState().ToIntegral());
-}
-
-
-TEST(SettingState) {
-  State state;
-  CHECK(state.IsEmpty());
-  state.Add(CompareNilICStub::NULL_TYPE);
-  CHECK(!state.IsEmpty());
-  CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
-  CHECK(!state.Contains(CompareNilICStub::UNDEFINED));
-  CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
-  state.Add(CompareNilICStub::UNDEFINED);
-  CHECK(state.Contains(CompareNilICStub::UNDEFINED));
-  CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
-  CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
-}
-
-
-TEST(ClearState) {
-  State state;
-  state.Add(CompareNilICStub::NULL_TYPE);
-  state.RemoveAll();
-  CHECK(state.IsEmpty());
-}
-
-
-TEST(Generic) {
-  State state;
-  CHECK(State::Generic() != state);
-  state.Add(CompareNilICStub::UNDEFINED);
-  CHECK(state != State::Generic());
-  state.Add(CompareNilICStub::NULL_TYPE);
-  CHECK(state != State::Generic());
-  state.Add(CompareNilICStub::UNDETECTABLE);
-  CHECK(state != State::Generic());
-  state.Add(CompareNilICStub::GENERIC);
-  CHECK(state == State::Generic());
-}
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 85b472d..9d6623e 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -405,6 +405,17 @@
             "e6ff3f94       usat r3, #31, r4, lsl #31");
     COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
             "e6e088d5       usat r8, #0, r5, asr #17");
+
+    COMPARE(pkhbt(r3, r4, Operand(r5, LSL, 17)),
+            "e6843895       pkhbt r3, r4, r5, lsl #17");
+    COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
+            "e68438d5       pkhtb r3, r4, r5, asr #17");
+    COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
+            "e6ef3474       uxtb r3, r4, ror #8");
+    COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
+            "e6e43475       uxtab r3, r4, r5, ror #8");
+    COMPARE(uxtb16(r3, Operand(r4, ROR, 8)),
+            "e6cf3474       uxtb16 r3, r4, ror #8");
   }
 
   VERIFY_RUN();
@@ -662,6 +673,23 @@
 }
 
 
+TEST(Neon) {
+  SET_UP();
+
+  if (CpuFeatures::IsSupported(NEON)) {
+    CpuFeatureScope scope(&assm, NEON);
+      COMPARE(vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(r1)),
+              "f421420f       vld1.8 {d4, d5, d6, d7}, [r1]");
+      COMPARE(vst1(Neon16, NeonListOperand(d17, 4), NeonMemOperand(r9)),
+              "f449124f       vst1.16 {d17, d18, d19, d20}, [r9]");
+      COMPARE(vmovl(NeonU8, q4, d2),
+              "f3884a12       vmovl.u8 q4, d2");
+  }
+
+  VERIFY_RUN();
+}
+
+
 TEST(LoadStore) {
   SET_UP();
 
@@ -858,6 +886,11 @@
             "e1eba7ff       strd r10, [fp, #+127]!");
     COMPARE(strd(ip, sp, MemOperand(sp, -127, PreIndex)),
             "e16dc7ff       strd ip, [sp, #-127]!");
+
+    COMPARE(pld(MemOperand(r1, 0)),
+            "f5d1f000       pld [r1]");
+    COMPARE(pld(MemOperand(r2, 128)),
+            "f5d2f080       pld [r2, #+128]");
   }
 
   VERIFY_RUN();
diff --git a/test/mjsunit/allocation-folding.js b/test/mjsunit/allocation-folding.js
new file mode 100644
index 0000000..a730bf1
--- /dev/null
+++ b/test/mjsunit/allocation-folding.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse-osr
+function f() {
+  var elem1 = [1,2,3];
+  for (var i=0; i < 100000; i++) {
+    var bar = [1];
+  }
+  var elem2 = [1,2,3];
+  return elem2;
+}
+
+f(); f(); f();
+%OptimizeFunctionOnNextCall(f);
+var result = f();
+
+for (var i=0; i < 100000; i++) {
+  var bar = [1];
+}
+
+assertEquals(result[2], 3);
diff --git a/test/mjsunit/allocation-site-info.js b/test/mjsunit/allocation-site-info.js
index 72df772..442d108 100644
--- a/test/mjsunit/allocation-site-info.js
+++ b/test/mjsunit/allocation-site-info.js
@@ -175,6 +175,20 @@
   obj = fastliteralcase_smifast(2);
   assertKind(elements_kind.fast, obj);
 
+  // Case: make sure transitions from packed to holey are tracked
+  function fastliteralcase_smiholey(index, value) {
+    var literal = [1, 2, 3, 4];
+    literal[index] = value;
+    return literal;
+  }
+
+  obj = fastliteralcase_smiholey(5, 1);
+  assertKind(elements_kind.fast_smi_only, obj);
+  assertHoley(obj);
+  obj = fastliteralcase_smiholey(0, 1);
+  assertKind(elements_kind.fast_smi_only, obj);
+  assertHoley(obj);
+
   function newarraycase_smidouble(value) {
     var a = new Array();
     a[0] = value;
@@ -268,6 +282,32 @@
   obj = newarraycase_list_smiobj(2);
   assertKind(elements_kind.fast, obj);
 
+  // Case: array constructor calls with out of date feedback.
+  // The boilerplate should incorporate all feedback, but the input array
+  // should be minimally transitioned based on immediate need.
+  (function() {
+    function foo(i) {
+      // We have two cases, one for literals one for constructed arrays.
+      var a = (i == 0)
+        ? [1, 2, 3]
+        : new Array(1, 2, 3);
+      return a;
+    }
+
+    for (i = 0; i < 2; i++) {
+      a = foo(i);
+      b = foo(i);
+      b[5] = 1;  // boilerplate goes holey
+      assertHoley(foo(i));
+      a[0] = 3.5;  // boilerplate goes holey double
+      assertKind(elements_kind.fast_double, a);
+      assertNotHoley(a);
+      c = foo(i);
+      assertKind(elements_kind.fast_double, c);
+      assertHoley(c);
+    }
+  })();
+
   function newarraycase_onearg(len, value) {
     var a = new Array(len);
     a[0] = value;
diff --git a/test/mjsunit/compiler/inline-arguments.js b/test/mjsunit/compiler/inline-arguments.js
index 75d01b5..7de412d 100644
--- a/test/mjsunit/compiler/inline-arguments.js
+++ b/test/mjsunit/compiler/inline-arguments.js
@@ -117,7 +117,7 @@
 // Test arguments access from the inlined function.
 function uninlinable(v) {
   assertEquals(0, v);
-  try { } catch (e) { }
+  %NeverOptimize();
   return 0;
 }
 
diff --git a/test/mjsunit/double-truncation.js b/test/mjsunit/double-truncation.js
new file mode 100644
index 0000000..b43e1e6
--- /dev/null
+++ b/test/mjsunit/double-truncation.js
@@ -0,0 +1,78 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function RunOneTruncationTest(a, b) {
+  var temp = a | 0;
+  assertEquals(b, temp);
+}
+
+function RunAllTruncationTests() {
+  RunOneTruncationTest(0, 0);
+  RunOneTruncationTest(0.5, 0);
+  RunOneTruncationTest(-0.5, 0);
+  RunOneTruncationTest(1.5, 1);
+  RunOneTruncationTest(-1.5, -1);
+  RunOneTruncationTest(5.5, 5);
+  RunOneTruncationTest(-5.0, -5);
+  RunOneTruncationTest(NaN, 0);
+  RunOneTruncationTest(Infinity, 0);
+  RunOneTruncationTest(-NaN, 0);
+  RunOneTruncationTest(-Infinity, 0);
+
+  RunOneTruncationTest(4.5036e+15, 0x1635E000);
+  RunOneTruncationTest(-4.5036e+15, -372629504);
+
+  RunOneTruncationTest(4503603922337791.0, -1);
+  RunOneTruncationTest(-4503603922337791.0, 1);
+  RunOneTruncationTest(4503601774854143.0, 2147483647);
+  RunOneTruncationTest(-4503601774854143.0, -2147483647);
+  RunOneTruncationTest(9007207844675582.0, -2);
+  RunOneTruncationTest(-9007207844675582.0, 2);
+
+  RunOneTruncationTest(2.4178527921507624e+24, -536870912);
+  RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
+  RunOneTruncationTest(2.417853945072267e+24, -536870912);
+  RunOneTruncationTest(-2.417853945072267e+24, 536870912);
+
+  RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
+  RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
+  RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
+  RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+
+  RunOneTruncationTest(9.6714111686030497e+24, -2147483648);
+  RunOneTruncationTest(-9.6714111686030497e+24, -2147483648);
+  RunOneTruncationTest(9.6714157802890681e+24, -2147483648);
+  RunOneTruncationTest(-9.6714157802890681e+24, -2147483648);
+}
+
+RunAllTruncationTests();
+RunAllTruncationTests();
+%OptimizeFunctionOnNextCall(RunOneTruncationTest);
+RunAllTruncationTests();
+RunAllTruncationTests();
diff --git a/test/mjsunit/elements-kind.js b/test/mjsunit/elements-kind.js
index 247aa89..90ec7ab 100644
--- a/test/mjsunit/elements-kind.js
+++ b/test/mjsunit/elements-kind.js
@@ -171,21 +171,21 @@
 
 if (support_smi_only_arrays) {
   function construct_smis() {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     var a = [0, 0, 0];
     a[0] = 0;  // Send the COW array map to the steak house.
     assertKind(elements_kind.fast_smi_only, a);
     return a;
   }
   function construct_doubles() {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     var a = construct_smis();
     a[0] = 1.5;
     assertKind(elements_kind.fast_double, a);
     return a;
   }
   function construct_objects() {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     var a = construct_smis();
     a[0] = "one";
     assertKind(elements_kind.fast, a);
@@ -194,7 +194,7 @@
 
   // Test crankshafted transition SMI->DOUBLE.
   function convert_to_double(array) {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     array[1] = 2.5;
     assertKind(elements_kind.fast_double, array);
     assertEquals(2.5, array[1]);
@@ -206,7 +206,7 @@
   convert_to_double(smis);
   // Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
   function convert_to_fast(array) {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     array[1] = "two";
     assertKind(elements_kind.fast, array);
     assertEquals("two", array[1]);
@@ -223,7 +223,7 @@
   // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
   // transition to FAST directly).
   function convert_mixed(array, value, kind) {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     array[1] = value;
     assertKind(kind, array);
     assertEquals(value, array[1]);
diff --git a/test/mjsunit/elide-double-hole-check-9.js b/test/mjsunit/elide-double-hole-check-9.js
index 4d277af..773f6d9 100644
--- a/test/mjsunit/elide-double-hole-check-9.js
+++ b/test/mjsunit/elide-double-hole-check-9.js
@@ -30,7 +30,7 @@
 var do_set = false;
 
 function set_proto_elements() {
-  try {} catch (e) {}  // Don't optimize or inline
+  %NeverOptimize();
   if (do_set) Array.prototype[1] = 1.5;
 }
 
diff --git a/test/mjsunit/external-array-no-sse2.js b/test/mjsunit/external-array-no-sse2.js
index cffcab8..9a008fb 100644
--- a/test/mjsunit/external-array-no-sse2.js
+++ b/test/mjsunit/external-array-no-sse2.js
@@ -606,8 +606,10 @@
 assertArrayPrefix([1, 12], a61)
 
 // Invalid source
-assertThrows(function() { a.set(0) })
-assertThrows(function() { a.set({}) })
+a.set(0); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
+a.set({}); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
 
 
 // Test arraybuffer.slice
diff --git a/test/mjsunit/external-array.js b/test/mjsunit/external-array.js
index deb3c86..81788d4 100644
--- a/test/mjsunit/external-array.js
+++ b/test/mjsunit/external-array.js
@@ -605,8 +605,10 @@
 assertArrayPrefix([1, 12], a61)
 
 // Invalid source
-assertThrows(function() { a.set(0) })
-assertThrows(function() { a.set({}) })
+a.set(0); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
+a.set({}); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
 
 
 // Test arraybuffer.slice
diff --git a/test/mjsunit/generated-transition-stub.js b/test/mjsunit/generated-transition-stub.js
index 072ce9c..6299a22 100644
--- a/test/mjsunit/generated-transition-stub.js
+++ b/test/mjsunit/generated-transition-stub.js
@@ -27,7 +27,7 @@
 
 // Flags: --allow-natives-syntax --compiled_transitions
 
-try {} catch (e) {}
+%NeverOptimize();
 
 var iteration_count = 1;
 
diff --git a/test/mjsunit/harmony/array-iterator.js b/test/mjsunit/harmony/array-iterator.js
new file mode 100644
index 0000000..f3a2627
--- /dev/null
+++ b/test/mjsunit/harmony/array-iterator.js
@@ -0,0 +1,195 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-iteration --allow-natives-syntax
+
+function TestArrayPrototype() {
+  assertTrue(Array.prototype.hasOwnProperty('entries'));
+  assertTrue(Array.prototype.hasOwnProperty('values'));
+  assertTrue(Array.prototype.hasOwnProperty('keys'));
+
+  assertFalse(Array.prototype.propertyIsEnumerable('entries'));
+  assertFalse(Array.prototype.propertyIsEnumerable('values'));
+  assertFalse(Array.prototype.propertyIsEnumerable('keys'));
+}
+TestArrayPrototype();
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({ value: value, done: done}, result);
+}
+
+function TestValues() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.values();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestValues();
+
+function TestValuesMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.values();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  array.push('d');
+  assertIteratorResult('d', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestValuesMutate();
+
+function TestKeys() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.keys();
+  assertIteratorResult('0', false, iterator.next());
+  assertIteratorResult('1', false, iterator.next());
+  assertIteratorResult('2', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeys();
+
+function TestKeysMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.keys();
+  assertIteratorResult('0', false, iterator.next());
+  assertIteratorResult('1', false, iterator.next());
+  assertIteratorResult('2', false, iterator.next());
+  array.push('d');
+  assertIteratorResult('3', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeysMutate();
+
+function TestEntries() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.entries();
+  assertIteratorResult(['0', 'a'], false, iterator.next());
+  assertIteratorResult(['1', 'b'], false, iterator.next());
+  assertIteratorResult(['2', 'c'], false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntries();
+
+function TestEntriesMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.entries();
+  assertIteratorResult(['0', 'a'], false, iterator.next());
+  assertIteratorResult(['1', 'b'], false, iterator.next());
+  assertIteratorResult(['2', 'c'], false, iterator.next());
+  array.push('d');
+  assertIteratorResult(['3', 'd'], false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntriesMutate();
+
+function TestArrayIteratorPrototype() {
+  var array = [];
+  var iterator = array.values();
+
+  var ArrayIterator = iterator.constructor;
+  assertEquals(ArrayIterator.prototype, array.values().__proto__);
+  assertEquals(ArrayIterator.prototype, array.keys().__proto__);
+  assertEquals(ArrayIterator.prototype, array.entries().__proto__);
+
+  assertEquals(Object.prototype, ArrayIterator.prototype.__proto__);
+
+  assertEquals('Array Iterator', %_ClassOf(array.values()));
+  assertEquals('Array Iterator', %_ClassOf(array.keys()));
+  assertEquals('Array Iterator', %_ClassOf(array.entries()));
+
+  var prototypeDescriptor =
+      Object.getOwnPropertyDescriptor(ArrayIterator, 'prototype');
+  assertFalse(prototypeDescriptor.configurable);
+  assertFalse(prototypeDescriptor.enumerable);
+  assertFalse(prototypeDescriptor.writable);
+}
+TestArrayIteratorPrototype();
+
+function TestForArrayValues() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var value of array.values()) {
+    buffer[i++] = value;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length - 1; i++) {
+    assertEquals(array[i], buffer[i]);
+  }
+  assertTrue(isNaN(buffer[buffer.length - 1]));
+}
+TestForArrayValues();
+
+function TestForArrayKeys() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var key of array.keys()) {
+    buffer[i++] = key;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertEquals(String(i), buffer[i]);
+  }
+}
+TestForArrayKeys();
+
+function TestForArrayEntries() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var entry of array.entries()) {
+    buffer[i++] = entry;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length - 1; i++) {
+    assertEquals(array[i], buffer[i][1]);
+  }
+  assertTrue(isNaN(buffer[buffer.length - 1][1]));
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertEquals(String(i), buffer[i][0]);
+  }
+}
+TestForArrayEntries();
diff --git a/test/mjsunit/harmony/dataview-accessors.js b/test/mjsunit/harmony/dataview-accessors.js
index 9dd8fe3..b655a41 100644
--- a/test/mjsunit/harmony/dataview-accessors.js
+++ b/test/mjsunit/harmony/dataview-accessors.js
@@ -62,7 +62,10 @@
 
 function checkGet(func, index, expected, littleEndian) {
   function doGet() {
-    return view["get" + func](index, littleEndian);
+    if (littleEndian != undefined)
+      return view["get" + func](index, littleEndian);
+    else
+      return view["get" + func](index);
   }
   if (index >=0 && index + getElementSize(func) - 1 < view.byteLength)
       assertSame(expected, doGet());
@@ -72,7 +75,10 @@
 
 function checkSet(func, index, value, littleEndian) {
   function doSet() {
-    view["set" + func](index, value, littleEndian);
+    if (littleEndian != undefined)
+      view["set" + func](index, value, littleEndian);
+    else
+      view["set" + func](index, value);
   }
   if (index >= 0 &&
       index + getElementSize(func) - 1 < view.byteLength) {
@@ -105,39 +111,46 @@
   createDataView(array, 0, true, start, length);
 
   test(isTestingGet, "Int8", 0, 0);
+  test(isTestingGet, "Int8", undefined, 0);
   test(isTestingGet, "Int8", 8, -128);
   test(isTestingGet, "Int8", 15, -1);
 
   test(isTestingGet, "Uint8", 0, 0);
+  test(isTestingGet, "Uint8", undefined, 0);
   test(isTestingGet, "Uint8", 8, 128);
   test(isTestingGet, "Uint8", 15, 255);
 
   // Little endian.
   test(isTestingGet, "Int16", 0, 256, true);
+  test(isTestingGet, "Int16", undefined, 256, true);
   test(isTestingGet, "Int16", 5, 26213, true);
   test(isTestingGet, "Int16", 9, -32127, true);
   test(isTestingGet, "Int16", 14, -2, true);
 
   // Big endian.
   test(isTestingGet, "Int16", 0, 1);
+  test(isTestingGet, "Int16", undefined, 1);
   test(isTestingGet, "Int16", 5, 25958);
   test(isTestingGet, "Int16", 9, -32382);
   test(isTestingGet, "Int16", 14, -257);
 
   // Little endian.
   test(isTestingGet, "Uint16", 0, 256, true);
+  test(isTestingGet, "Uint16", undefined, 256, true);
   test(isTestingGet, "Uint16", 5, 26213, true);
   test(isTestingGet, "Uint16", 9, 33409, true);
   test(isTestingGet, "Uint16", 14, 65534, true);
 
   // Big endian.
   test(isTestingGet, "Uint16", 0, 1);
+  test(isTestingGet, "Uint16", undefined, 1);
   test(isTestingGet, "Uint16", 5, 25958);
   test(isTestingGet, "Uint16", 9, 33154);
   test(isTestingGet, "Uint16", 14, 65279);
 
   // Little endian.
   test(isTestingGet, "Int32", 0, 50462976, true);
+  test(isTestingGet, "Int32", undefined, 50462976, true);
   test(isTestingGet, "Int32", 3, 1717920771, true);
   test(isTestingGet, "Int32", 6, -2122291354, true);
   test(isTestingGet, "Int32", 9, -58490239, true);
@@ -145,6 +158,7 @@
 
   // Big endian.
   test(isTestingGet, "Int32", 0, 66051);
+  test(isTestingGet, "Int32", undefined, 66051);
   test(isTestingGet, "Int32", 3, 56911206);
   test(isTestingGet, "Int32", 6, 1718059137);
   test(isTestingGet, "Int32", 9, -2122152964);
@@ -152,6 +166,7 @@
 
   // Little endian.
   test(isTestingGet, "Uint32", 0, 50462976, true);
+  test(isTestingGet, "Uint32", undefined, 50462976, true);
   test(isTestingGet, "Uint32", 3, 1717920771, true);
   test(isTestingGet, "Uint32", 6, 2172675942, true);
   test(isTestingGet, "Uint32", 9, 4236477057, true);
@@ -159,6 +174,7 @@
 
   // Big endian.
   test(isTestingGet, "Uint32", 0, 66051);
+  test(isTestingGet, "Uint32", undefined, 66051);
   test(isTestingGet, "Uint32", 3, 56911206);
   test(isTestingGet, "Uint32", 6, 1718059137);
   test(isTestingGet, "Uint32", 9, 2172814332);
@@ -169,6 +185,7 @@
   // Little endian.
   createDataView(array, 0, true, start);
   test(isTestingGet, func, 0, expected, true);
+  test(isTestingGet, func, undefined, expected, true);
   createDataView(array, 3, true, start);
   test(isTestingGet, func, 3, expected, true);
   createDataView(array, 7, true, start);
@@ -179,6 +196,7 @@
   // Big endian.
   createDataView(array, 0, false);
   test(isTestingGet, func, 0, expected, false);
+  test(isTestingGet, func, undefined, expected, false);
   createDataView(array, 3, false);
   test(isTestingGet, func, 3, expected, false);
   createDataView(array, 7, false);
@@ -286,8 +304,10 @@
   var a = new DataView(new ArrayBuffer(256));
   function CheckAccessor(name) {
     var f = a[name];
+    assertThrows(function() { f(); }, TypeError);
     f.call(a, 0, 0); // should not throw
     assertThrows(function() { f.call({}, 0, 0); }, TypeError);
+    assertThrows(function() { f.call(a); }, TypeError);
   }
   CheckAccessor("getUint8");
   CheckAccessor("setUint8");
diff --git a/test/mjsunit/harmony/typedarrays.js b/test/mjsunit/harmony/typedarrays.js
index 99364c8..8e8ec57 100644
--- a/test/mjsunit/harmony/typedarrays.js
+++ b/test/mjsunit/harmony/typedarrays.js
@@ -453,8 +453,15 @@
 
   // Invalid source
   var a = new Uint16Array(50);
-  assertThrows(function() { a.set(0) }, TypeError);
-  assertThrows(function() { a.set({}) }, TypeError);
+  var expected = [];
+  for (i = 0; i < 50; i++) {
+    a[i] = i;
+    expected.push(i);
+  }
+  a.set(0);
+  assertArrayPrefix(expected, a);
+  a.set({});
+  assertArrayPrefix(expected, a);
   assertThrows(function() { a.set.call({}) }, TypeError);
   assertThrows(function() { a.set.call([]) }, TypeError);
 }
diff --git a/test/mjsunit/never-optimize.js b/test/mjsunit/never-optimize.js
new file mode 100644
index 0000000..553cd17
--- /dev/null
+++ b/test/mjsunit/never-optimize.js
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function o1() {
+}
+
+if (%GetOptimizationStatus(o1) != 4) {
+  // 4 == optimization disabled.
+  o1(); o1();
+  %OptimizeFunctionOnNextCall(o1);
+  o1();
+
+  // check that the given function was optimized.
+  var o1_status = %GetOptimizationStatus(o1);
+  assertTrue(o1_status == 1    // optimized
+          || o1_status == 3    // optimized (always opt)
+          || o1_status == 5);  // lazy recompile requested
+
+  // Test the %NeverOptimize runtime call.
+  function u1() {
+    %NeverOptimize();
+  }
+
+  function u2() {
+  }
+
+  %NeverOptimize(u2);
+
+  u1(); u1();
+  u2(); u2();
+
+  %OptimizeFunctionOnNextCall(u1);
+  %OptimizeFunctionOnNextCall(u2);
+
+  u1(); u1();
+  u2(); u2();
+
+  // 2 => not optimized.
+  assertEquals(2, %GetOptimizationStatus(u1));
+  assertEquals(2, %GetOptimizationStatus(u2));
+
+}
\ No newline at end of file
diff --git a/test/mjsunit/opt-elements-kind.js b/test/mjsunit/opt-elements-kind.js
index 3df1d9b..b0f94f2 100644
--- a/test/mjsunit/opt-elements-kind.js
+++ b/test/mjsunit/opt-elements-kind.js
@@ -109,7 +109,7 @@
 }
 
 function construct_smis() {
-  try {} catch (e) {} // TODO(titzer): DisableOptimization
+  %NeverOptimize();
   var a = [0, 0, 0];
   a[0] = 0;  // Send the COW array map to the steak house.
   assertKind(elements_kind.fast_smi_only, a);
@@ -117,7 +117,7 @@
 }
 
 function construct_doubles() {
-  try {} catch (e) {} // TODO(titzer): DisableOptimization
+  %NeverOptimize();
   var a = construct_smis();
   a[0] = 1.5;
   assertKind(elements_kind.fast_double, a);
@@ -125,7 +125,7 @@
 }
 
 function convert_mixed(array, value, kind) {
-  try {} catch (e) {} // TODO(titzer): DisableOptimization
+  %NeverOptimize();
   array[1] = value;
   assertKind(kind, array);
   assertEquals(value, array[1]);
diff --git a/test/mjsunit/osr-elements-kind.js b/test/mjsunit/osr-elements-kind.js
index 9b0f506..e59262e 100644
--- a/test/mjsunit/osr-elements-kind.js
+++ b/test/mjsunit/osr-elements-kind.js
@@ -113,14 +113,14 @@
 
 if (support_smi_only_arrays) {
   function construct_smis() {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     var a = [0, 0, 0];
     a[0] = 0;  // Send the COW array map to the steak house.
     assertKind(elements_kind.fast_smi_only, a);
     return a;
   }
   function construct_doubles() {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     var a = construct_smis();
     a[0] = 1.5;
     assertKind(elements_kind.fast_double, a);
@@ -130,7 +130,7 @@
   // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
   // transition to FAST directly).
   function convert_mixed(array, value, kind) {
-    try {} catch (e) {} // TODO(titzer): DisableOptimization
+    %NeverOptimize();
     array[1] = value;
     assertKind(kind, array);
     assertEquals(value, array[1]);
diff --git a/test/mjsunit/regress/regress-1713b.js b/test/mjsunit/regress/regress-1713b.js
new file mode 100644
index 0000000..2e35fa4
--- /dev/null
+++ b/test/mjsunit/regress/regress-1713b.js
@@ -0,0 +1,127 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+  var arr = [];
+  do {arr.push(new Array(1024));} while (--N > 0);
+  return arr;
+}
+
+function AllocateXMb(X) {
+  return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+  var n = this;
+  while (n.next !== null) n = n.next;
+  n.v(O);
+};
+
+function LongList(N, x) {
+  if (N == 0) return new Node(x, null);
+  return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+  for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+function Incremental(O, x) {
+  if (!x) {
+    return;
+  }
+  function CreateCode(i) {
+    var f = new Function("return O.f_" + i);
+    CODE.push(f);
+    f(); // compile
+    f(); // compile
+    f(); // compile
+  }
+
+  for (var i = 0; i < 1e4; i++) CreateCode(i);
+  gc();
+  gc();
+  gc();
+
+  print(">>> 1 <<<");
+
+  L.execute(O);
+
+  %NeverOptimize();
+
+  L = null;
+  print(">>> 2 <<<");
+  AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+  Incremental(O, x);
+
+  print('f');
+
+  for (var i = 0; i < 5; i++) O.f;
+
+
+  print('g');
+
+  bar(x);
+}
+
+function bar(x) {
+  if (!x) return;
+  %DeoptimizeFunction(foo);
+  AllocateXMb(8);
+  AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
diff --git a/test/mjsunit/regress/regress-97116b.js b/test/mjsunit/regress/regress-97116b.js
new file mode 100644
index 0000000..e8b7331
--- /dev/null
+++ b/test/mjsunit/regress/regress-97116b.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+// Check that we are not flushing code for inlined functions that
+// have a pending lazy deoptimization on the stack.
+
+function deopt() {
+  %NeverOptimize();
+  %DeoptimizeFunction(outer);
+  for (var i = 0; i < 10; i++) gc();  // Force code flushing.
+}
+
+function outer(should_deopt) {
+  inner(should_deopt);
+}
+
+function inner(should_deopt) {
+  if (should_deopt) deopt();
+}
+
+outer(false);
+outer(false);
+%OptimizeFunctionOnNextCall(outer);
+outer(true);
diff --git a/test/mjsunit/regress/regress-crbug-173907b.js b/test/mjsunit/regress/regress-crbug-173907b.js
new file mode 100644
index 0000000..698527b
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-173907b.js
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var X = 1.1;
+var K = 0.5;
+
+var O = 0;
+var result = new Float64Array(2);
+
+function spill() {
+  %NeverOptimize();
+}
+
+function buggy() {
+  var v = X;
+  var phi1 = v + K;
+  var phi2 = v - K;
+
+  spill();  // At this point initial values for phi1 and phi2 are spilled.
+
+  var xmm1 = v;
+  var xmm2 = v*v*v;
+  var xmm3 = v*v*v*v;
+  var xmm4 = v*v*v*v*v;
+  var xmm5 = v*v*v*v*v*v;
+  var xmm6 = v*v*v*v*v*v*v;
+  var xmm7 = v*v*v*v*v*v*v*v;
+  var xmm8 = v*v*v*v*v*v*v*v*v;
+
+  // All registers are blocked and phis for phi1 and phi2 are spilled because
+  // their left (incoming) value is spilled, there are no free registers,
+  // and phis themselves have only ANY-policy uses.
+
+  for (var x = 0; x < 2; x++) {
+    xmm1 += xmm1 * xmm6;
+    xmm2 += xmm1 * xmm5;
+    xmm3 += xmm1 * xmm4;
+    xmm4 += xmm1 * xmm3;
+    xmm5 += xmm1 * xmm2;
+
+    // Now swap values of phi1 and phi2 to create cycle between phis.
+    var t = phi1;
+    phi1 = phi2;
+    phi2 = t;
+  }
+
+  // Now we want to get values of phi1 and phi2. However we would like to
+  // do it in a way that does not produce any uses of phi1&phi2 that have
+  // a register beneficial policy. How? We just hide these uses behind phis.
+  result[0] = (O === 0) ? phi1 : phi2;
+  result[1] = (O !== 0) ? phi1 : phi2;
+}
+
+function test() {
+  buggy();
+  assertArrayEquals([X + K, X - K], result);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(buggy);
+test();
diff --git a/test/mjsunit/regress/regress-deopt-gcb.js b/test/mjsunit/regress/regress-deopt-gcb.js
new file mode 100644
index 0000000..95bb450
--- /dev/null
+++ b/test/mjsunit/regress/regress-deopt-gcb.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// This tests that we can correctly handle a GC immediately after a function
+// has been deoptimized, even when we have an activation of this function on
+// the stack.
+
+// Ensure that there is code objects before the code for the opt_me function.
+(function() { var a = 10; a++; })();
+
+function opt_me() {
+  deopt();
+}
+
+function deopt() {
+  // Make sure we don't inline this function
+  %NeverOptimize();
+  %DeoptimizeFunction(opt_me);
+  gc();
+}
+
+
+opt_me();
diff --git a/test/mjsunit/regress/regress-mul-canoverflowb.js b/test/mjsunit/regress/regress-mul-canoverflowb.js
new file mode 100644
index 0000000..2332e94
--- /dev/null
+++ b/test/mjsunit/regress/regress-mul-canoverflowb.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function boom(a) {
+  return ((a | 0) * (a | 0)) | 0;
+}
+function boom_unoptimized(a) {
+  %NeverOptimize();
+  return ((a | 0) * (a | 0)) | 0;
+}
+
+boom(1, 1);
+boom(2, 2);
+
+%OptimizeFunctionOnNextCall(boom);
+var big_int = 0x5F00000F;
+var expected = boom_unoptimized(big_int);
+var actual = boom(big_int)
+assertEquals(expected, actual);
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 19a1bc4..33358a3 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -337,8 +337,12 @@
         '../../src/heap.h',
         '../../src/hydrogen-bce.cc',
         '../../src/hydrogen-bce.h',
+        '../../src/hydrogen-canonicalize.cc',
+        '../../src/hydrogen-canonicalize.h',
         '../../src/hydrogen-dce.cc',
         '../../src/hydrogen-dce.h',
+        '../../src/hydrogen-dehoist.cc',
+        '../../src/hydrogen-dehoist.h',
         '../../src/hydrogen-environment-liveness.cc',
         '../../src/hydrogen-environment-liveness.h',
         '../../src/hydrogen-escape-analysis.cc',
@@ -359,6 +363,8 @@
         '../../src/hydrogen-range-analysis.h',
         '../../src/hydrogen-redundant-phi.cc',
         '../../src/hydrogen-redundant-phi.h',
+        '../../src/hydrogen-removable-simulates.cc',
+        '../../src/hydrogen-removable-simulates.h',
         '../../src/hydrogen-representation-changes.cc',
         '../../src/hydrogen-representation-changes.h',
         '../../src/hydrogen-sce.cc',
@@ -367,6 +373,8 @@
         '../../src/hydrogen-uint32-analysis.h',
         '../../src/hydrogen-osr.cc',
         '../../src/hydrogen-osr.h',
+        '../../src/icu_util.cc',
+        '../../src/icu_util.h',
         '../../src/ic-inl.h',
         '../../src/ic.cc',
         '../../src/ic.h',
@@ -827,9 +835,15 @@
             '../../src/extensions/i18n/number-format.h',
           ],
           'dependencies': [
-            '<(DEPTH)/third_party/icu/icu.gyp:*',
+            '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+            '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
           ]
         }],
+        ['OS=="win" and v8_enable_i18n_support==1', {
+          'dependencies': [
+            '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+          ],
+        }],
       ],
     },
     {
@@ -888,7 +902,8 @@
           '../../src/object-observe.js',
           '../../src/arraybuffer.js',
           '../../src/typedarray.js',
-          '../../src/generator.js'
+          '../../src/generator.js',
+          '../../src/array-iterator.js'
         ],
         'i18n_library_files': [
           '../../src/extensions/i18n/header.js',