Version 3.1.2.

Added better security checks when accessing properties via Object.getOwnPropertyDescriptor.

Fixed bug in Object.defineProperty and related access bugs (issues 992, 1083 and 1092).

Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease copyright notice generation for embedders.


git-svn-id: http://v8.googlecode.com/svn/trunk@6662 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/.gitignore b/.gitignore
index d85ef64..c68dadb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@
 shell
 shell_g
 /obj/
+/test/sputnik/sputniktests/
 /tools/oom_dump/oom_dump
 /tools/oom_dump/oom_dump.o
 /tools/visual_studio/Debug
diff --git a/AUTHORS b/AUTHORS
index da86488..1b756ca 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -26,6 +26,7 @@
 Matt Hanselman <mjhanselman@gmail.com>
 Martyn Capewell <martyn.capewell@arm.com>
 Michael Smith <mike@w3.org>
+Mike Gilbert <floppymaster@gmail.com>
 Paolo Giarrusso <p.giarrusso@gmail.com>
 Patrick Gansterer <paroga@paroga.com>
 Rafal Krypa <rafal@krypa.net>
@@ -35,4 +36,4 @@
 Sanjoy Das <sanjoy@playingwithpointers.com>
 Subrato K De <subratokde@codeaurora.org>
 Vlad Burlik <vladbph@gmail.com>
-Mike Gilbert <floppymaster@gmail.com>
+Zaheer Ahmad <zahmad@codeaurora.org>
diff --git a/ChangeLog b/ChangeLog
index f143a40..d48ded8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2011-02-07: Version 3.1.2
+
+        Added better security checks when accessing properties via
+        Object.getOwnPropertyDescriptor.
+
+        Fixed bug in Object.defineProperty and related access bugs (issues
+        992, 1083 and 1092).
+
+        Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
+        copyright notice generation for embedders.
+
+
 2011-02-02: Version 3.1.1
 
         Perform security checks before fetching the value in
diff --git a/src/third_party/strongtalk/LICENSE b/LICENSE.strongtalk
similarity index 78%
rename from src/third_party/strongtalk/LICENSE
rename to LICENSE.strongtalk
index 7473a7b..9bd62e4 100644
--- a/src/third_party/strongtalk/LICENSE
+++ b/LICENSE.strongtalk
@@ -6,15 +6,15 @@
 met:
 
 - Redistributions of source code must retain the above copyright notice,
-  this list of conditions and the following disclaimer.
+this list of conditions and the following disclaimer.
 
 - Redistribution in binary form must reproduce the above copyright
-  notice, this list of conditions and the following disclaimer in the
-  documentation and/or other materials provided with the distribution.
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
 
 - Neither the name of Sun Microsystems or the names of contributors may
-  be used to endorse or promote products derived from this software without
-  specific prior written permission.
+be used to endorse or promote products derived from this software without
+specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
diff --git a/LICENSE.v8 b/LICENSE.v8
new file mode 100644
index 0000000..933718a
--- /dev/null
+++ b/LICENSE.v8
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of Google Inc. nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE.valgrind b/LICENSE.valgrind
new file mode 100644
index 0000000..fd8ebaf
--- /dev/null
+++ b/LICENSE.valgrind
@@ -0,0 +1,45 @@
+----------------------------------------------------------------
+
+Notice that the following BSD-style license applies to this one
+file (valgrind.h) only.  The rest of Valgrind is licensed under the
+terms of the GNU General Public License, version 2, unless
+otherwise indicated.  See the COPYING file in the source
+distribution for details.
+
+----------------------------------------------------------------
+
+This file is part of Valgrind, a dynamic binary instrumentation
+framework.
+
+Copyright (C) 2000-2007 Julian Seward.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must 
+   not claim that you wrote the original software.  If you use this 
+   software in a product, an acknowledgment in the product 
+   documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+   not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote 
+   products derived from this software without specific prior written 
+   permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ebf040d..243ba49 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2124,7 +2124,7 @@
                         const int dst_code,
                         const VFPType src_type,
                         const int src_code,
-                        Assembler::ConversionMode mode,
+                        VFPConversionMode mode,
                         const Condition cond) {
   ASSERT(src_type != dst_type);
   int D, Vd, M, Vm;
@@ -2167,7 +2167,7 @@
 
 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
                              const SwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
@@ -2176,7 +2176,7 @@
 
 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
                              const SwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
@@ -2185,7 +2185,7 @@
 
 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
                              const SwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
@@ -2194,7 +2194,7 @@
 
 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
                              const DwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
@@ -2203,7 +2203,7 @@
 
 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
                              const DwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
@@ -2212,7 +2212,7 @@
 
 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
                              const SwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
@@ -2221,7 +2221,7 @@
 
 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
                              const DwVfpRegister src,
-                             ConversionMode mode,
+                             VFPConversionMode mode,
                              const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index a6edf66..fc826c7 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -942,37 +942,33 @@
   void vmov(const Register dst,
             const SwVfpRegister src,
             const Condition cond = al);
-  enum ConversionMode {
-    FPSCRRounding = 0,
-    RoundToZero = 1
-  };
   void vcvt_f64_s32(const DwVfpRegister dst,
                     const SwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_f32_s32(const SwVfpRegister dst,
                     const SwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_f64_u32(const DwVfpRegister dst,
                     const SwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_s32_f64(const SwVfpRegister dst,
                     const DwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_u32_f64(const SwVfpRegister dst,
                     const DwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_f64_f32(const DwVfpRegister dst,
                     const SwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
   void vcvt_f32_f64(const SwVfpRegister dst,
                     const DwVfpRegister src,
-                    ConversionMode mode = RoundToZero,
+                    VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
 
   void vabs(const DwVfpRegister dst,
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 590d8ce..437dfd2 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -396,6 +396,19 @@
                            Register scratch1,
                            Register scratch2,
                            Label* not_number);
+
+  // Loads the number from object into dst as a 32-bit integer if possible. If
+  // the object is not a 32-bit integer control continues at the label
+  // not_int32. If VFP is supported double_scratch is used but not scratch2.
+  static void LoadNumberAsInteger(MacroAssembler* masm,
+                                  Register object,
+                                  Register dst,
+                                  Register heap_number_map,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  DwVfpRegister double_scratch,
+                                  Label* not_int32);
+
  private:
   static void LoadNumber(MacroAssembler* masm,
                          FloatingPointHelper::Destination destination,
@@ -461,15 +474,21 @@
 
 
 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
-                                       Destination destination,
-                                       Register object,
-                                       DwVfpRegister dst,
-                                       Register dst1,
-                                       Register dst2,
-                                       Register heap_number_map,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Label* not_number) {
+                                     Destination destination,
+                                     Register object,
+                                     DwVfpRegister dst,
+                                     Register dst1,
+                                     Register dst2,
+                                     Register heap_number_map,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     Label* not_number) {
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+
   Label is_smi, done;
 
   __ JumpIfSmi(object, &is_smi);
@@ -514,6 +533,34 @@
 }
 
 
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+                                              Register object,
+                                              Register dst,
+                                              Register heap_number_map,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              DwVfpRegister double_scratch,
+                                              Label* not_int32) {
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  Label is_smi, done;
+  __ JumpIfSmi(object, &is_smi);
+  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+  __ cmp(scratch1, heap_number_map);
+  __ b(ne, not_int32);
+  __ ConvertToInt32(
+      object, dst, scratch1, scratch2, double_scratch, not_int32);
+  __ jmp(&done);
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
+  __ bind(&done);
+}
+
+
+
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1676,7 +1723,7 @@
   __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  __ ConvertToInt32(lhs, r3, r5, r4, &slow);
+  __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
   __ jmp(&done_checking_lhs);
   __ bind(&lhs_is_smi);
   __ mov(r3, Operand(lhs, ASR, 1));
@@ -1687,7 +1734,7 @@
   __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  __ ConvertToInt32(rhs, r2, r5, r4, &slow);
+  __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
   __ jmp(&done_checking_rhs);
   __ bind(&rhs_is_smi);
   __ mov(r2, Operand(rhs, ASR, 1));
@@ -2529,6 +2576,18 @@
       __ and_(right, left, Operand(scratch1));
       __ Ret();
       break;
+    case Token::BIT_OR:
+      __ orr(right, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_AND:
+      __ and_(right, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_XOR:
+      __ eor(right, left, Operand(right));
+      __ Ret();
+      break;
     default:
       UNREACHABLE();
   }
@@ -2545,90 +2604,179 @@
   Register scratch1 = r7;
   Register scratch2 = r9;
 
-  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending
-  // on whether VFP3 is available.
-  FloatingPointHelper::Destination destination =
-      CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
-      FloatingPointHelper::kVFPRegisters :
-      FloatingPointHelper::kCoreRegisters;
+  ASSERT(smi_operands || (not_numbers != NULL));
+  if (smi_operands && FLAG_debug_code) {
+    __ AbortIfNotSmi(left);
+    __ AbortIfNotSmi(right);
+  }
 
   Register heap_number_map = r6;
   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
 
-  // Allocate new heap number for result.
-  Register result = r5;
-  __ AllocateHeapNumber(
-      result, scratch1, scratch2, heap_number_map, gc_required);
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+      // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
+      // depending on whether VFP3 is available or not.
+      FloatingPointHelper::Destination destination =
+          CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+          FloatingPointHelper::kVFPRegisters :
+          FloatingPointHelper::kCoreRegisters;
 
-  // Load the operands.
-  if (smi_operands) {
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
-  } else {
-    FloatingPointHelper::LoadOperands(masm,
-                                      destination,
-                                      heap_number_map,
-                                      scratch1,
-                                      scratch2,
-                                      not_numbers);
-  }
+      // Allocate new heap number for result.
+      Register result = r5;
+      __ AllocateHeapNumber(
+          result, scratch1, scratch2, heap_number_map, gc_required);
 
-  // Calculate the result.
-  if (destination == FloatingPointHelper::kVFPRegisters) {
-    // Using VFP registers:
-    // d6: Left value
-    // d7: Right value
-    CpuFeatures::Scope scope(VFP3);
-    switch (op_) {
-      case Token::ADD:
-        __ vadd(d5, d6, d7);
-        break;
-      case Token::SUB:
-        __ vsub(d5, d6, d7);
-        break;
-      case Token::MUL:
-        __ vmul(d5, d6, d7);
-        break;
-      case Token::DIV:
-        __ vdiv(d5, d6, d7);
-        break;
-      default:
-        UNREACHABLE();
-    }
+      // Load the operands.
+      if (smi_operands) {
+        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+      } else {
+        FloatingPointHelper::LoadOperands(masm,
+                                          destination,
+                                          heap_number_map,
+                                          scratch1,
+                                          scratch2,
+                                          not_numbers);
+      }
 
-    __ sub(r0, result, Operand(kHeapObjectTag));
-    __ vstr(d5, r0, HeapNumber::kValueOffset);
-    __ add(r0, r0, Operand(kHeapObjectTag));
-    __ Ret();
-  } else {
-    // Using core registers:
-    // r0: Left value (least significant part of mantissa).
-    // r1: Left value (sign, exponent, top of mantissa).
-    // r2: Right value (least significant part of mantissa).
-    // r3: Right value (sign, exponent, top of mantissa).
+      // Calculate the result.
+      if (destination == FloatingPointHelper::kVFPRegisters) {
+        // Using VFP registers:
+        // d6: Left value
+        // d7: Right value
+        CpuFeatures::Scope scope(VFP3);
+        switch (op_) {
+          case Token::ADD:
+            __ vadd(d5, d6, d7);
+            break;
+          case Token::SUB:
+            __ vsub(d5, d6, d7);
+            break;
+          case Token::MUL:
+            __ vmul(d5, d6, d7);
+            break;
+          case Token::DIV:
+            __ vdiv(d5, d6, d7);
+            break;
+          default:
+            UNREACHABLE();
+        }
 
-    __ push(lr);  // For later.
-    __ PrepareCallCFunction(4, scratch1);  // Two doubles are 4 arguments.
-    // Call C routine that may not cause GC or other trouble. r5 is callee
-    // save.
-    __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
-    // Store answer in the overwritable heap number.
+        __ sub(r0, result, Operand(kHeapObjectTag));
+        __ vstr(d5, r0, HeapNumber::kValueOffset);
+        __ add(r0, r0, Operand(kHeapObjectTag));
+        __ Ret();
+      } else {
+        // Using core registers:
+        // r0: Left value (least significant part of mantissa).
+        // r1: Left value (sign, exponent, top of mantissa).
+        // r2: Right value (least significant part of mantissa).
+        // r3: Right value (sign, exponent, top of mantissa).
+
+        // Push the current return address before the C call. Return will be
+        // through pop(pc) below.
+        __ push(lr);
+        __ PrepareCallCFunction(4, scratch1);  // Two doubles are 4 arguments.
+        // Call C routine that may not cause GC or other trouble. r5 is callee
+        // save.
+        __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+        // Store answer in the overwritable heap number.
 #if !defined(USE_ARM_EABI)
-    // Double returned in fp coprocessor register 0 and 1, encoded as
-    // register cr8.  Offsets must be divisible by 4 for coprocessor so we
-    // need to substract the tag from r5.
-    __ sub(scratch1, result, Operand(kHeapObjectTag));
-    __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+        // Double returned in fp coprocessor register 0 and 1, encoded as
+        // register cr8.  Offsets must be divisible by 4 for coprocessor so we
+        // need to substract the tag from r5.
+        __ sub(scratch1, result, Operand(kHeapObjectTag));
+        __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
 #else
-    // Double returned in registers 0 and 1.
-    __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
+        // Double returned in registers 0 and 1.
+        __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
 #endif
-    __ mov(r0, Operand(result));
-    // And we are done.
-    __ pop(pc);
+        // Plase result in r0 and return to the pushed return address.
+        __ mov(r0, Operand(result));
+        __ pop(pc);
+      }
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      if (smi_operands) {
+        __ SmiUntag(r3, left);
+        __ SmiUntag(r2, right);
+      } else {
+        // Convert operands to 32-bit integers. Right in r2 and left in r3.
+        FloatingPointHelper::LoadNumberAsInteger(masm,
+                                                 left,
+                                                 r3,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 d0,
+                                                 not_numbers);
+        FloatingPointHelper::LoadNumberAsInteger(masm,
+                                                 right,
+                                                 r2,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 d0,
+                                                 not_numbers);
+      }
+      switch (op_) {
+        case Token::BIT_OR:
+          __ orr(r2, r3, Operand(r2));
+          break;
+        case Token::BIT_XOR:
+          __ eor(r2, r3, Operand(r2));
+          break;
+        case Token::BIT_AND:
+          __ and_(r2, r3, Operand(r2));
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      Label result_not_a_smi;
+      // Check that the *signed* result fits in a smi.
+      __ add(r3, r2, Operand(0x40000000), SetCC);
+      __ b(mi, &result_not_a_smi);
+      __ SmiTag(r0, r2);
+      __ Ret();
+
+      // Allocate new heap number for result.
+      __ bind(&result_not_a_smi);
+      __ AllocateHeapNumber(
+          r5, scratch1, scratch2, heap_number_map, gc_required);
+
+      // r2: Answer as signed int32.
+      // r5: Heap number to write answer into.
+
+      // Nothing can go wrong now, so move the heap number to r0, which is the
+      // result.
+      __ mov(r0, Operand(r5));
+
+      if (CpuFeatures::IsSupported(VFP3)) {
+        // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+        CpuFeatures::Scope scope(VFP3);
+        __ vmov(s0, r2);
+        __ vcvt_f64_s32(d0, s0);
+        __ sub(r3, r0, Operand(kHeapObjectTag));
+        __ vstr(d0, r3, HeapNumber::kValueOffset);
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in r2 to the heap number in r0, using
+        // r3 as scratch. r0 is preserved and returned.
+        WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+        __ TailCallStub(&stub);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
   }
 }
 
@@ -2646,7 +2794,10 @@
          op_ == Token::SUB ||
          op_ == Token::MUL ||
          op_ == Token::DIV ||
-         op_ == Token::MOD);
+         op_ == Token::MOD ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_AND ||
+         op_ == Token::BIT_XOR);
 
   Register left = r1;
   Register right = r0;
@@ -2678,7 +2829,10 @@
          op_ == Token::SUB ||
          op_ == Token::MUL ||
          op_ == Token::DIV ||
-         op_ == Token::MOD);
+         op_ == Token::MOD ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_AND ||
+         op_ == Token::BIT_XOR);
 
   if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
       result_type_ == TRBinaryOpIC::SMI) {
@@ -2714,7 +2868,10 @@
          op_ == Token::SUB ||
          op_ == Token::MUL ||
          op_ == Token::DIV ||
-         op_ == Token::MOD);
+         op_ == Token::MOD ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_AND ||
+         op_ == Token::BIT_XOR);
 
   ASSERT(operands_type_ == TRBinaryOpIC::INT32);
 
@@ -2727,7 +2884,10 @@
          op_ == Token::SUB ||
          op_ == Token::MUL ||
          op_ == Token::DIV ||
-         op_ == Token::MOD);
+         op_ == Token::MOD ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_AND ||
+         op_ == Token::BIT_XOR);
 
   Label not_numbers, call_runtime;
   ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@@ -2747,7 +2907,10 @@
          op_ == Token::SUB ||
          op_ == Token::MUL ||
          op_ == Token::DIV ||
-         op_ == Token::MOD);
+         op_ == Token::MOD ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_AND ||
+         op_ == Token::BIT_XOR);
 
   Label call_runtime;
 
@@ -2812,6 +2975,15 @@
     case Token::MOD:
       __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
       break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+      break;
     default:
       UNREACHABLE();
   }
@@ -3037,7 +3209,7 @@
     __ b(ne, &slow);
 
     // Convert the heap number is r0 to an untagged integer in r1.
-    __ ConvertToInt32(r0, r1, r2, r3, &slow);
+    __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
 
     // Do the bitwise operation (move negated) and check if the result
     // fits in a smi.
@@ -3329,9 +3501,17 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
+  // Compute the argv pointer in a callee-saved register.
+  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+  __ sub(r6, r6, Operand(kPointerSize));
+
   // Enter the exit frame that transitions from JavaScript to C++.
   __ EnterExitFrame(save_doubles_);
 
+  // Setup argc and the builtin function in callee-saved registers.
+  __ mov(r4, Operand(r0));
+  __ mov(r5, Operand(r1));
+
   // r4: number of arguments (C callee-saved)
   // r5: pointer to builtin function (C callee-saved)
   // r6: pointer to first argument (C callee-saved)
@@ -5734,6 +5914,90 @@
 }
 
 
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  __ ldr(pc, MemOperand(sp, 0));
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    ApiFunction *function) {
+  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                     RelocInfo::CODE_TARGET));
+  // Push return address (accessible to GC through exit frame pc).
+  __ mov(r2,
+         Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+  __ str(pc, MemOperand(sp, 0));
+  __ Jump(r2);  // Call the api function.
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements_map,
+                                Register elements,
+                                Register scratch1,
+                                Register scratch2,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - set to be the receiver's elements on exit.
+  //
+  // elements_map - set to be the map of the receiver's elements
+  //            on exit.
+  //
+  // result   - holds the result of the pixel array load on exit,
+  //            tagged as a smi if successful.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used a scratch register in map check, if map
+  //            check is successful, contains the length of the
+  //            pixel array, the pointer to external elements and
+  //            the untagged result.
+  //
+  // scratch2 - holds the untaged key.
+
+  // Some callers already have verified that the key is a smi.  key_not_smi is
+  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
+  // to ensure the key is a smi must be added.
+  if (key_not_smi != NULL) {
+    __ JumpIfNotSmi(key, key_not_smi);
+  } else {
+    if (FLAG_debug_code) {
+      __ AbortIfNotSmi(key);
+    }
+  }
+  __ SmiUntag(scratch2, key);
+
+  // Verify that the receiver has pixel array elements.
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
+              not_pixel_array, true);
+
+  // Key must be in range of the pixel array.
+  __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
+  __ cmp(scratch2, scratch1);
+  __ b(hs, out_of_range);  // unsigned check handles negative keys.
+
+  // Perform the indexed load and tag the result as a smi.
+  __ ldr(scratch1,
+         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+  __ ldrb(scratch1, MemOperand(scratch1, scratch2));
+  __ SmiTag(r0, scratch1);
+  __ Ret();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 9e9047e..bf7d635 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -571,6 +571,45 @@
 };
 
 
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+  DirectCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+  void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+
+ private:
+  Major MajorKey() { return DirectCEntry; }
+  int MinorKey() { return 0; }
+  const char* GetName() { return "DirectCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements_map,
+                                Register elements,
+                                Register scratch1,
+                                Register scratch2,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 9a60183..1284223 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1110,7 +1110,7 @@
 
   Register int32 = r2;
   // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
-  __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+  __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
 
   // tos_register_ (r0 or r1): Original heap number.
   // int32: signed 32bits int.
@@ -4177,7 +4177,10 @@
       __ ldr(r1, frame_->Receiver());
       frame_->EmitPush(r1);
 
-      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+      // Push the strict mode flag.
+      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
+      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
 
       done.Jump();
       slow.Bind();
@@ -4197,8 +4200,11 @@
     __ ldr(r1, frame_->Receiver());
     frame_->EmitPush(r1);
 
+    // Push the strict mode flag.
+    frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
     // Resolve the call.
-    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
     // If we generated fast-case code bind the jump-target where fast
     // and slow case merge.
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 589e704..8f46256 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -287,6 +287,7 @@
   // Accessors
   inline bool is_eval();
   inline Scope* scope();
+  inline StrictModeFlag strict_mode_flag();
 
   // Generating deferred code.
   void ProcessDeferred();
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 7502ef0..5671fee 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -380,10 +380,13 @@
 
 
 // VFP FPSCR constants.
+enum VFPConversionMode {
+  kFPSCRRounding = 0,
+  kDefaultRoundToZero = 1
+};
+
 static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
 static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
 static const uint32_t kVFPInvalidExceptionBit = 1;
 
 static const uint32_t kVFPNConditionFlagBit = 1 << 31;
@@ -393,13 +396,20 @@
 
 
 // VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum FPSCRRoundingModes {
-  RN,   // Round to Nearest.
-  RP,   // Round towards Plus Infinity.
-  RM,   // Round towards Minus Infinity.
-  RZ    // Round towards zero.
+enum VFPRoundingMode {
+  RN = 0 << 22,   // Round to Nearest.
+  RP = 1 << 22,   // Round towards Plus Infinity.
+  RM = 2 << 22,   // Round towards Minus Infinity.
+  RZ = 3 << 22,   // Round towards zero.
+
+  // Aliases.
+  kRoundToNearest = RN,
+  kRoundToPlusInf = RP,
+  kRoundToMinusInf = RM,
+  kRoundToZero = RZ
 };
 
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
 
 // -----------------------------------------------------------------------------
 // Hints.
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 75399c6..e05001f 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -97,7 +97,7 @@
 #ifdef DEBUG
   // Destroy the code which is not supposed to be run again.
   int instructions =
-      (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+      (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
   CodePatcher destroyer(code->instruction_start() + last_pc_offset,
                         instructions);
   for (int x = 0; x < instructions; x++) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 23e5f69..ff446c5 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1554,7 +1554,10 @@
       op == Token::SUB ||
       op == Token::MUL ||
       op == Token::DIV ||
-      op == Token::MOD) {
+      op == Token::MOD ||
+      op == Token::BIT_OR ||
+      op == Token::BIT_AND ||
+      op == Token::BIT_XOR) {
     TypeRecordingBinaryOpStub stub(op, mode);
     __ CallStub(&stub);
   } else {
@@ -1923,7 +1926,10 @@
       __ ldr(r1,
              MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
       __ push(r1);
-      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+      // Push the strict mode flag.
+      __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+      __ push(r1);
+      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d74468c..1aa031d 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1189,19 +1189,18 @@
   // r0: key
   // r1: receiver
   __ bind(&check_pixel_array);
-  __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
-  __ cmp(r3, ip);
-  __ b(ne, &check_number_dictionary);
-  __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
-  __ mov(r2, Operand(key, ASR, kSmiTagSize));
-  __ cmp(r2, ip);
-  __ b(hs, &slow);
-  __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
-  __ ldrb(r2, MemOperand(ip, r2));
-  __ mov(r0, Operand(r2, LSL, kSmiTagSize));  // Tag result as smi.
-  __ Ret();
+
+  GenerateFastPixelArrayLoad(masm,
+                             r1,
+                             r0,
+                             r3,
+                             r4,
+                             r2,
+                             r5,
+                             r0,
+                             &check_number_dictionary,
+                             NULL,
+                             &slow);
 
   __ bind(&check_number_dictionary);
   // Check whether the elements is a number dictionary.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index b7c1b78..f672d49 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -25,6 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "lithium-allocator-inl.h"
 #include "arm/lithium-arm.h"
 #include "arm/lithium-codegen-arm.h"
 
@@ -56,6 +57,31 @@
 }
 
 
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as
+  // temporaries and outputs because all registers
+  // are blocked by the calling convention.
+  // Inputs can use either fixed register or have a short lifetime (be
+  // used at start of the instruction).
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           LUnallocated::cast(operand)->IsUsedAtStart() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+  for (TempIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+}
+#endif
+
+
 void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
                                           LOperand* spill_operand) {
   ASSERT(spill_operand->IsDoubleStackSlot());
@@ -66,9 +92,8 @@
 
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
-  if (HasResult()) {
-    PrintOutputOperandTo(stream);
-  }
+
+  PrintOutputOperandTo(stream);
 
   PrintDataTo(stream);
 
@@ -158,6 +183,9 @@
     case Token::MUL: return "mul-t";
     case Token::MOD: return "mod-t";
     case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
     default:
       UNREACHABLE();
       return NULL;
@@ -258,7 +286,15 @@
 
 
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
-  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  InputAt(1)->PrintTo(stream);
 }
 
 
@@ -390,7 +426,7 @@
 }
 
 
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
   LGap* gap = new LGap(block);
   int index = -1;
   if (instr->IsControl()) {
@@ -406,7 +442,6 @@
     pointer_maps_.Add(instr->pointer_map());
     instr->pointer_map()->set_lithium_position(index);
   }
-  return index;
 }
 
 
@@ -672,7 +707,10 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  allocator_->MarkAsCall();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
   if (hinstr->HasSideEffects()) {
@@ -697,7 +735,7 @@
 
 
 LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  allocator_->MarkAsSaveDoubles();
+  instr->MarkAsSaveDoubles();
   return instr;
 }
 
@@ -742,13 +780,23 @@
 
 LInstruction* LChunkBuilder::DoBit(Token::Value op,
                                    HBitwiseBinaryOperation* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().IsInteger32());
-  ASSERT(instr->right()->representation().IsInteger32());
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
 
-  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-  return DefineSameAsFirst(new LBitI(op, left, right));
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(op, left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), r1);
+    LOperand* right = UseFixed(instr->right(), r0);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
@@ -887,7 +935,6 @@
 void LChunkBuilder::VisitInstruction(HInstruction* current) {
   HInstruction* old_current = current_instruction_;
   current_instruction_ = current;
-  allocator_->BeginInstruction();
   if (current->has_position()) position_ = current->position();
   LInstruction* instr = current->CompileToLithium(this);
 
@@ -910,11 +957,7 @@
       instr->set_hydrogen_value(current);
     }
 
-    int index = chunk_->AddInstruction(instr, current_block_);
-    allocator_->SummarizeInstruction(index);
-  } else {
-    // This instruction should be omitted.
-    allocator_->OmitInstruction();
+    chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
 }
@@ -1105,13 +1148,26 @@
 }
 
 
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LOuterContext(context));
+}
+
+
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  return DefineAsRegister(new LGlobalObject);
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
-  return DefineAsRegister(new LGlobalReceiver);
+  LOperand* global_object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalReceiver(global_object));
 }
 
 
@@ -1514,7 +1570,7 @@
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      LDoubleToI* res = new LDoubleToI(value);
+      LDoubleToI* res = new LDoubleToI(value, TempRegister());
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
@@ -1621,7 +1677,20 @@
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
-  return DefineAsRegister(new LLoadContextSlot);
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context = UseTempRegister(instr->context());
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    value = UseTempRegister(instr->value());
+  } else {
+    value = UseRegister(instr->value());
+  }
+  return new LStoreContextSlot(context, value);
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 7f89ee2..a076c80 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -39,118 +39,6 @@
 // Forward declarations.
 class LCodeGen;
 
-
-// Type hierarchy:
-//
-// LInstruction
-//   LTemplateInstruction
-//     LControlInstruction
-//       LBranch
-//       LClassOfTestAndBranch
-//       LCmpJSObjectEqAndBranch
-//       LCmpIDAndBranch
-//       LHasCachedArrayIndexAndBranch
-//       LHasInstanceTypeAndBranch
-//       LInstanceOfAndBranch
-//       LIsNullAndBranch
-//       LIsObjectAndBranch
-//       LIsSmiAndBranch
-//       LTypeofIsAndBranch
-//     LAccessArgumentsAt
-//     LArgumentsElements
-//     LArgumentsLength
-//     LAddI
-//     LApplyArguments
-//     LArithmeticD
-//     LArithmeticT
-//     LBitI
-//     LBoundsCheck
-//     LCmpID
-//     LCmpJSObjectEq
-//     LCmpT
-//     LDivI
-//     LInstanceOf
-//     LInstanceOfKnownGlobal
-//     LLoadKeyedFastElement
-//     LLoadKeyedGeneric
-//     LModI
-//     LMulI
-//     LPower
-//     LShiftI
-//     LSubI
-//     LCallConstantFunction
-//     LCallFunction
-//     LCallGlobal
-//     LCallKeyed
-//     LCallKnownGlobal
-//     LCallNamed
-//     LCallRuntime
-//     LCallStub
-//     LConstant
-//       LConstantD
-//       LConstantI
-//       LConstantT
-//     LDeoptimize
-//     LFunctionLiteral
-//     LGap
-//       LLabel
-//     LGlobalObject
-//     LGlobalReceiver
-//     LGoto
-//     LLazyBailout
-//     LLoadGlobal
-//     LCheckPrototypeMaps
-//     LLoadContextSlot
-//     LArrayLiteral
-//     LObjectLiteral
-//     LRegExpLiteral
-//     LOsrEntry
-//     LParameter
-//     LRegExpConstructResult
-//     LStackCheck
-//     LStoreKeyed
-//       LStoreKeyedFastElement
-//       LStoreKeyedGeneric
-//     LStoreNamed
-//       LStoreNamedField
-//       LStoreNamedGeneric
-//     LStringCharCodeAt
-//     LBitNotI
-//     LCallNew
-//     LCheckFunction
-//     LCheckPrototypeMaps
-//     LCheckInstanceType
-//     LCheckMap
-//     LCheckSmi
-//     LClassOfTest
-//     LDeleteProperty
-//     LDoubleToI
-//     LFixedArrayLength
-//     LHasCachedArrayIndex
-//     LHasInstanceType
-//     LInteger32ToDouble
-//     LIsNull
-//     LIsObject
-//     LIsSmi
-//     LJSArrayLength
-//     LLoadNamedField
-//     LLoadNamedGeneric
-//     LLoadFunctionPrototype
-//     LNumberTagD
-//     LNumberTagI
-//     LPushArgument
-//     LReturn
-//     LSmiTag
-//     LStoreGlobal
-//     LStringLength
-//     LTaggedToI
-//     LThrow
-//     LTypeof
-//     LTypeofIs
-//     LUnaryMathOperation
-//     LValueOf
-//     LUnknownOSRValue
-
 #define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
   V(ControlInstruction)                         \
   V(Constant)                                   \
@@ -187,6 +75,8 @@
   V(CheckMap)                                   \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
   V(CmpID)                                      \
   V(CmpIDAndBranch)                             \
   V(CmpJSObjectEq)                              \
@@ -197,6 +87,7 @@
   V(ConstantD)                                  \
   V(ConstantI)                                  \
   V(ConstantT)                                  \
+  V(Context)                                    \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
@@ -207,6 +98,10 @@
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
@@ -218,22 +113,16 @@
   V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
   V(JSArrayLength)                              \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
-  V(HasCachedArrayIndex)                        \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(ClassOfTest)                                \
-  V(ClassOfTestAndBranch)                       \
   V(Label)                                      \
   V(LazyBailout)                                \
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
+  V(LoadFunctionPrototype)                      \
   V(LoadGlobal)                                 \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadNamedField)                             \
   V(LoadNamedGeneric)                           \
-  V(LoadFunctionPrototype)                      \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -241,6 +130,7 @@
   V(NumberUntagD)                               \
   V(ObjectLiteral)                              \
   V(OsrEntry)                                   \
+  V(OuterContext)                               \
   V(Parameter)                                  \
   V(PushArgument)                               \
   V(RegExpLiteral)                              \
@@ -249,14 +139,15 @@
   V(SmiTag)                                     \
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
+  V(StoreContextSlot)                           \
   V(StoreGlobal)                                \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
-  V(SubI)                                       \
   V(StringCharCodeAt)                           \
   V(StringLength)                               \
+  V(SubI)                                       \
   V(TaggedToI)                                  \
   V(Throw)                                      \
   V(Typeof)                                     \
@@ -290,7 +181,10 @@
 class LInstruction: public ZoneObject {
  public:
   LInstruction()
-      : hydrogen_value_(NULL) { }
+      :  environment_(NULL),
+         hydrogen_value_(NULL),
+         is_call_(false),
+         is_save_doubles_(false) { }
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -307,16 +201,14 @@
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
-  void set_environment(LEnvironment* env) { environment_.set(env); }
-  LEnvironment* environment() const { return environment_.get(); }
-  bool HasEnvironment() const { return environment_.is_set(); }
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
 
   void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
   LPointerMap* pointer_map() const { return pointer_map_.get(); }
   bool HasPointerMap() const { return pointer_map_.is_set(); }
 
-  virtual bool HasResult() const = 0;
-
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
@@ -330,11 +222,35 @@
     return deoptimization_environment_.is_set();
   }
 
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
  private:
-  SetOncePointer<LEnvironment> environment_;
+  LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
   SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
@@ -361,6 +277,11 @@
  public:
   int length() { return 0; }
   void PrintOperandsTo(StringStream* stream) { }
+  ElementType& operator[](int i) {
+    UNREACHABLE();
+    static ElementType t = 0;
+    return t;
+  }
 };
 
 
@@ -1266,18 +1187,41 @@
 };
 
 
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
   DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
 
-  int context_chain_length() { return hydrogen()->context_chain_length(); }
+  LOperand* context() { return InputAt(0); }
   int slot_index() { return hydrogen()->slot_index(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* value() { return InputAt(1); }
+  int slot_index() { return hydrogen()->slot_index(); }
+  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LPushArgument: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
@@ -1288,15 +1232,45 @@
 };
 
 
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
 };
 
 
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LOuterContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalObject(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalReceiver(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+  LOperand* global() { return InputAt(0); }
 };
 
 
@@ -1431,10 +1405,11 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
  public:
-  explicit LDoubleToI(LOperand* value) {
+  explicit LDoubleToI(LOperand* value, LOperand* temp1) {
     inputs_[0] = value;
+    temps_[0] = temp1;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1789,7 +1764,7 @@
  public:
   explicit LChunk(HGraph* graph);
 
-  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
   LConstantOperand* DefineConstantOperand(HConstant* constant);
   Handle<Object> LookupLiteral(LConstantOperand* operand) const;
   Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index a1adae3..855ed46 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -223,7 +223,7 @@
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
   code->set_stack_slots(StackSlotCount());
-  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
 
@@ -1174,7 +1174,7 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
     // scratch:left = left * right.
-    __ smull(scratch, left, left, right);
+    __ smull(left, scratch, left, right);
     __ mov(ip, Operand(left, ASR, 31));
     __ cmp(ip, Operand(scratch));
     DeoptimizeIf(ne, instr->environment());
@@ -1398,7 +1398,18 @@
       __ vdiv(left, left, right);
       break;
     case Token::MOD: {
-      Abort("DoArithmeticD unimplemented for MOD.");
+      // Save r0-r3 on the stack.
+      __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+
+      __ PrepareCallCFunction(4, scratch0());
+      __ vmov(r0, r1, left);
+      __ vmov(r2, r3, right);
+      __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+      // Move the result in the double result register.
+      __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+
+      // Restore r0-r3.
+      __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
       break;
     }
     default:
@@ -1595,17 +1606,58 @@
 
 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
   __ cmp(ToRegister(left), ToOperand(right));
-  Abort("EmitCmpI untested.");
 }
 
 
 void LCodeGen::DoCmpID(LCmpID* instr) {
-  Abort("DoCmpID unimplemented.");
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  Register scratch = scratch0();
+
+  Label unordered, done;
+  if (instr->is_double()) {
+    // Compare left and right as doubles and load the
+    // resulting flags into the normal status register.
+    __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ vmrs(pc);
+    // If a NaN is involved, i.e. the result is unordered (V set),
+    // jump to unordered to return false.
+    __ b(vs, &unordered);
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+  __ b(cc, &done);
+
+  __ bind(&unordered);
+  __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
-  Abort("DoCmpIDAndBranch unimplemented.");
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  if (instr->is_double()) {
+    // Compare left and right as doubles and load the
+    // resulting flags into the normal status register.
+    __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ vmrs(pc);
+    // If a NaN is involved, i.e. the result is unordered (V set),
+    // jump to false block label.
+    __ b(vs, chunk_->GetAssemblyLabel(false_block));
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -2201,13 +2253,27 @@
 
 
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
-  // TODO(antonm): load a context with a separate instruction.
+  Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
-  __ LoadContext(result, instr->context_chain_length());
+  __ ldr(result,
+         MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   __ ldr(result, ContextOperand(result, instr->slot_index()));
 }
 
 
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  __ ldr(context,
+         MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  __ str(value, ContextOperand(context, instr->slot_index()));
+  if (instr->needs_write_barrier()) {
+    int offset = Context::SlotOffset(instr->slot_index());
+    __ RecordWrite(context, Operand(offset), value, scratch0());
+  }
+}
+
+
 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   Register object = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -2458,16 +2524,32 @@
 }
 
 
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ ldr(result,
+         MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+  __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+}
+
+
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
 }
 
 
 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register global = ToRegister(instr->global());
   Register result = ToRegister(instr->result());
-  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+  __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
 }
 
 
@@ -2625,34 +2707,53 @@
 }
 
 
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register prev_fpscr = ToRegister(instr->TempAt(0));
-  SwVfpRegister single_scratch = double_scratch0().low();
-  Register scratch = scratch0();
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+                               SwVfpRegister result,
+                               DwVfpRegister double_input,
+                               Register scratch1,
+                               Register scratch2) {
+  Register prev_fpscr = scratch1;
+  Register scratch = scratch2;
 
   // Set custom FPCSR:
-  //  - Set rounding mode to "Round towards Minus Infinity".
+  //  - Set rounding mode.
   //  - Clear vfp cumulative exception flags.
   //  - Make sure Flush-to-zero mode control bit is unset.
   __ vmrs(prev_fpscr);
-  __ bic(scratch, prev_fpscr,
-      Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
-  __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+  __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
+                                      kVFPRoundingModeMask |
+                                      kVFPFlushToZeroMask));
+  __ orr(scratch, scratch, Operand(rounding_mode));
   __ vmsr(scratch);
 
   // Convert the argument to an integer.
-  __ vcvt_s32_f64(single_scratch,
-                  input,
-                  Assembler::FPSCRRounding,
-                  al);
+  __ vcvt_s32_f64(result,
+                  double_input,
+                  kFPSCRRounding);
 
-  // Retrieve FPSCR and check for vfp exceptions.
+  // Retrieve FPSCR.
   __ vmrs(scratch);
-  // Restore FPSCR
+  // Restore FPSCR.
   __ vmsr(prev_fpscr);
+  // Check for vfp exceptions.
   __ tst(scratch, Operand(kVFPExceptionMask));
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  SwVfpRegister single_scratch = double_scratch0().low();
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+
+  EmitVFPTruncate(kRoundToMinusInf,
+                  single_scratch,
+                  input,
+                  scratch1,
+                  scratch2);
   DeoptimizeIf(ne, instr->environment());
 
   // Move the result back to general purpose register r0.
@@ -2662,8 +2763,8 @@
   Label done;
   __ cmp(result, Operand(0));
   __ b(ne, &done);
-  __ vmov(scratch, input.high());
-  __ tst(scratch, Operand(HeapNumber::kSignMask));
+  __ vmov(scratch1, input.high());
+  __ tst(scratch1, Operand(HeapNumber::kSignMask));
   DeoptimizeIf(ne, instr->environment());
   __ bind(&done);
 }
@@ -3297,7 +3398,42 @@
 
 
 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
-  Abort("DoDoubleToI unimplemented.");
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsRegister());
+
+  DoubleRegister double_input = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+  SwVfpRegister single_scratch = double_scratch0().low();
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+
+  VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
+                                                      : kRoundToNearest;
+
+  EmitVFPTruncate(rounding_mode,
+                  single_scratch,
+                  double_input,
+                  scratch1,
+                  scratch2);
+  // Deoptimize if we had a vfp invalid exception.
+  DeoptimizeIf(ne, instr->environment());
+  // Retrieve the result.
+  __ vmov(result_reg, single_scratch);
+
+  if (instr->truncating() &&
+      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label done;
+    __ cmp(result_reg, Operand(0));
+    __ b(ne, &done);
+    // Check for -0.
+    __ vmov(scratch1, double_input.high());
+    __ tst(scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment());
+
+    __ bind(&done);
+  }
 }
 
 
@@ -3497,7 +3633,7 @@
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
-  bool pretenure = !instr->hydrogen()->pretenure();
+  bool pretenure = instr->hydrogen()->pretenure();
   if (shared_info->num_literals() == 0 && !pretenure) {
     FastNewClosureStub stub;
     __ mov(r1, Operand(shared_info));
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 27a72f2..3f7fe45 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -219,6 +219,11 @@
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
   void DoMathAbs(LUnaryMathOperation* instr);
+  void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+                       SwVfpRegister result,
+                       DwVfpRegister double_input,
+                       Register scratch1,
+                       Register scratch2);
   void DoMathFloor(LUnaryMathOperation* instr);
   void DoMathSqrt(LUnaryMathOperation* instr);
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index ef35102..c11d664 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -632,11 +632,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
-  // Compute the argv pointer in a callee-saved register.
-  add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
-  sub(r6, r6, Operand(kPointerSize));
-
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
   // Setup the frame structure on the stack.
   ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -658,10 +654,6 @@
   mov(ip, Operand(ExternalReference(Top::k_context_address)));
   str(cp, MemOperand(ip));
 
-  // Setup argc and the builtin function in callee-saved registers.
-  mov(r4, Operand(r0));
-  mov(r5, Operand(r1));
-
   // Optionally save all double registers.
   if (save_doubles) {
     sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
@@ -675,10 +667,10 @@
     // since the sp slot and code slot were pushed after the fp.
   }
 
-  // Reserve place for the return address and align the frame preparing for
-  // calling the runtime function.
+  // Reserve place for the return address and stack space and align the frame
+  // preparing for calling the runtime function.
   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
-  sub(sp, sp, Operand(kPointerSize));
+  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
   if (frame_alignment > 0) {
     ASSERT(IsPowerOf2(frame_alignment));
     and_(sp, sp, Operand(-frame_alignment));
@@ -1475,17 +1467,115 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+  return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ApiFunction* function, int stack_space) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(),
+      next_address);
+
+  // Allocate HandleScope in callee-save registers.
+  mov(r7, Operand(next_address));
+  ldr(r4, MemOperand(r7, kNextOffset));
+  ldr(r5, MemOperand(r7, kLimitOffset));
+  ldr(r6, MemOperand(r7, kLevelOffset));
+  add(r6, r6, Operand(1));
+  str(r6, MemOperand(r7, kLevelOffset));
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub;
+  stub.GenerateCall(this, function);
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  // If result is non-zero, dereference to get the result value
+  // otherwise set it to undefined.
+  cmp(r0, Operand(0));
+  LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+  ldr(r0, MemOperand(r0), ne);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  str(r4, MemOperand(r7, kNextOffset));
+  if (FLAG_debug_code) {
+    ldr(r1, MemOperand(r7, kLevelOffset));
+    cmp(r1, r6);
+    Check(eq, "Unexpected level after return from api call");
+  }
+  sub(r6, r6, Operand(1));
+  str(r6, MemOperand(r7, kLevelOffset));
+  ldr(ip, MemOperand(r7, kLimitOffset));
+  cmp(r5, ip);
+  b(ne, &delete_allocated_handles);
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+  mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+  ldr(r5, MemOperand(ip));
+  cmp(r4, r5);
+  b(ne, &promote_scheduled_exception);
+
+  // LeaveExitFrame expects unwind space to be in r4.
+  mov(r4, Operand(stack_space));
+  LeaveExitFrame(false);
+
+  bind(&promote_scheduled_exception);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  str(r5, MemOperand(r7, kLimitOffset));
+  mov(r4, r0);
+  PrepareCallCFunction(0, r5);
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+  mov(r0, r4);
+  jmp(&leave_exit_frame);
+
+  return result;
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -1577,13 +1667,14 @@
                                     Register dest,
                                     Register scratch,
                                     Register scratch2,
+                                    DwVfpRegister double_scratch,
                                     Label *not_int32) {
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     sub(scratch, source, Operand(kHeapObjectTag));
-    vldr(d0, scratch, HeapNumber::kValueOffset);
-    vcvt_s32_f64(s0, d0);
-    vmov(dest, s0);
+    vldr(double_scratch, scratch, HeapNumber::kValueOffset);
+    vcvt_s32_f64(double_scratch.low(), double_scratch);
+    vmov(dest, double_scratch.low());
     // Signed vcvt instruction will saturate to the minimum (0x80000000) or
     // maximun (0x7fffffff) signed 32bits integer when the double is out of
     // range. When substracting one, the minimum signed integer becomes the
@@ -1739,6 +1830,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r0, Operand(num_arguments));
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1757,6 +1859,18 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+#if defined(__thumb__)
+  // Thumb mode builtin.
+  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+  mov(r1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeJSFlags flags,
                                    PostCallGenerator* post_call_generator) {
@@ -1999,6 +2113,16 @@
 }
 
 
+void MacroAssembler::AbortIfNotRootValue(Register src,
+                                         Heap::RootListIndex root_value_index,
+                                         const char* message) {
+  ASSERT(!src.is(ip));
+  LoadRoot(ip, root_value_index);
+  cmp(src, ip);
+  Assert(eq, message);
+}
+
+
 void MacroAssembler::JumpIfNotHeapNumber(Register object,
                                          Register heap_number_map,
                                          Register scratch,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index a1a13e3..c9ffde8 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -287,10 +287,8 @@
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
   // Enter exit frame.
-  // Expects the number of arguments in register r0 and
-  // the builtin function to call in register r1. Exits with argc in
-  // r4, argv in r6, and and the builtin function to call in r5.
-  void EnterExitFrame(bool save_doubles);
+  // stack_space - extra stack space, used for alignment before call to C.
+  void EnterExitFrame(bool save_doubles, int stack_space = 0);
 
   // Leave the current exit frame. Expects the return value in r0.
   void LeaveExitFrame(bool save_doubles);
@@ -589,11 +587,13 @@
 
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
-  // to not_int32 label.
+  // to not_int32 label. If VFP3 is available double_scratch is used but not
+  // scratch2.
   void ConvertToInt32(Register source,
                       Register dest,
                       Register scratch,
                       Register scratch2,
+                      DwVfpRegister double_scratch,
                       Label *not_int32);
 
   // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
@@ -614,6 +614,12 @@
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = al);
+
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -632,6 +638,12 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -655,9 +667,18 @@
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, int num_arguments);
 
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  // stack_space - space to be unwound on exit (includes the call js
+  // arguments space and the additional space allocated for the fast call).
+  MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
+                                           int stack_space);
+
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -763,6 +784,11 @@
   void AbortIfSmi(Register object);
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is not the root value with the given index.
+  void AbortIfNotRootValue(Register src,
+                           Heap::RootListIndex root_value_index,
+                           const char* message);
+
   // ---------------------------------------------------------------------------
   // HeapNumber utilities
 
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index de44030..8104747 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -744,10 +744,10 @@
 // offset from the svc instruction so the simulator knows what to call.
 class Redirection {
  public:
-  Redirection(void* external_function, bool fp_return)
+  Redirection(void* external_function, ExternalReference::Type type)
       : external_function_(external_function),
         swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
-        fp_return_(fp_return),
+        type_(type),
         next_(list_) {
     Simulator::current()->
         FlushICache(reinterpret_cast<void*>(&swi_instruction_),
@@ -760,14 +760,15 @@
   }
 
   void* external_function() { return external_function_; }
-  bool fp_return() { return fp_return_; }
+  ExternalReference::Type type() { return type_; }
 
-  static Redirection* Get(void* external_function, bool fp_return) {
+  static Redirection* Get(void* external_function,
+                          ExternalReference::Type type) {
     Redirection* current;
     for (current = list_; current != NULL; current = current->next_) {
       if (current->external_function_ == external_function) return current;
     }
-    return new Redirection(external_function, fp_return);
+    return new Redirection(external_function, type);
   }
 
   static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -780,7 +781,7 @@
  private:
   void* external_function_;
   uint32_t swi_instruction_;
-  bool fp_return_;
+  ExternalReference::Type type_;
   Redirection* next_;
   static Redirection* list_;
 };
@@ -790,8 +791,8 @@
 
 
 void* Simulator::RedirectExternalReference(void* external_function,
-                                           bool fp_return) {
-  Redirection* redirection = Redirection::Get(external_function, fp_return);
+                                           ExternalReference::Type type) {
+  Redirection* redirection = Redirection::Get(external_function, type);
   return redirection->address_of_swi_instruction();
 }
 
@@ -1528,6 +1529,9 @@
                                          int32_t arg2,
                                          int32_t arg3);
 
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
 
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime.
@@ -1550,9 +1554,9 @@
       // This is dodgy but it works because the C entry stubs are never moved.
       // See comment in codegen-arm.cc and bug 1242173.
       int32_t saved_lr = get_register(lr);
-      if (redirection->fp_return()) {
-        intptr_t external =
-            reinterpret_cast<intptr_t>(redirection->external_function());
+      intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+      if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
         SimulatorRuntimeFPCall target =
             reinterpret_cast<SimulatorRuntimeFPCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1568,9 +1572,28 @@
         CHECK(stack_aligned);
         double result = target(arg0, arg1, arg2, arg3);
         SetFpResult(result);
+      } else if (redirection->type() == ExternalReference::DIRECT_CALL) {
+        SimulatorRuntimeApiCall target =
+            reinterpret_cast<SimulatorRuntimeApiCall>(external);
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF(
+              "Call to host function at %p args %08x",
+              FUNCTION_ADDR(target),
+              arg0);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08x\n", get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        v8::Handle<v8::Value> result = target(arg0);
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+        }
+        set_register(r0, (int32_t) *result);
       } else {
-        intptr_t external =
-            reinterpret_cast<int32_t>(redirection->external_function());
+        // builtin call.
+        ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
         SimulatorRuntimeCall target =
             reinterpret_cast<SimulatorRuntimeCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -2539,7 +2562,7 @@
                          (overflow_vfp_flag_ << 2) |
                          (div_zero_vfp_flag_ << 1) |
                          (inv_op_vfp_flag_ << 0) |
-                         (FPSCR_rounding_mode_ << 22);
+                         (FPSCR_rounding_mode_);
         set_register(rt, fpscr);
       }
     } else if ((instr->VLValue() == 0x0) &&
@@ -2562,7 +2585,7 @@
         div_zero_vfp_flag_ = (rt_value >> 1) & 1;
         inv_op_vfp_flag_ = (rt_value >> 0) & 1;
         FPSCR_rounding_mode_ =
-          static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3);
+            static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
       }
     } else {
       UNIMPLEMENTED();  // Not used by V8.
@@ -2651,87 +2674,135 @@
   }
 }
 
+bool get_inv_op_vfp_flag(VFPRoundingMode mode,
+                         double val,
+                         bool unsigned_) {
+  ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+  double max_uint = static_cast<double>(0xffffffffu);
+  double max_int = static_cast<double>(kMaxInt);
+  double min_int = static_cast<double>(kMinInt);
+
+  // Check for NaN.
+  if (val != val) {
+    return true;
+  }
+
+  // Check for overflow. This code works because 32bit integers can be
+  // exactly represented by ieee-754 64bit floating-point values.
+  switch (mode) {
+    case RN:
+      return  unsigned_ ? (val >= (max_uint + 0.5)) ||
+                          (val < -0.5)
+                        : (val >= (max_int + 0.5)) ||
+                          (val < (min_int - 0.5));
+
+    case RM:
+      return  unsigned_ ? (val >= (max_uint + 1.0)) ||
+                          (val < 0)
+                        : (val >= (max_int + 1.0)) ||
+                          (val < min_int);
+
+    case RZ:
+      return  unsigned_ ? (val >= (max_uint + 1.0)) ||
+                          (val <= -1)
+                        : (val >= (max_int + 1.0)) ||
+                          (val <= (min_int - 1.0));
+    default:
+      UNREACHABLE();
+      return true;
+  }
+}
+
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+int VFPConversionSaturate(double val, bool unsigned_res) {
+  if (val != val) {
+    return 0;
+  } else {
+    if (unsigned_res) {
+      return (val < 0) ? 0 : 0xffffffffu;
+    } else {
+      return (val < 0) ? kMinInt : kMaxInt;
+    }
+  }
+}
+
 
 void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+         (instr->Bits(27, 23) == 0x1D));
   ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
          (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   // Conversion between floating-point and integer.
   bool to_integer = (instr->Bit(18) == 1);
 
-  VFPRegPrecision src_precision = kSinglePrecision;
-  if (instr->SzValue() == 1) {
-    src_precision = kDoublePrecision;
-  }
+  VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
+                                                          : kSinglePrecision;
 
   if (to_integer) {
-    bool unsigned_integer = (instr->Bit(16) == 0);
-    FPSCRRoundingModes mode;
-    if (instr->Bit(7) != 1) {
-      // Use FPSCR defined rounding mode.
-      mode = FPSCR_rounding_mode_;
-      // Only RZ and RM modes are supported.
-      ASSERT((mode == RM) || (mode == RZ));
-    } else {
-      // VFP uses round towards zero by default.
-      mode = RZ;
-    }
+    // We are playing with code close to the C++ standard's limits below,
+    // hence the very simple code and heavy checks.
+    //
+    // Note:
+    // C++ defines default type casting from floating point to integer as
+    // (close to) rounding toward zero ("fractional part discarded").
 
     int dst = instr->VFPDRegValue(kSinglePrecision);
     int src = instr->VFPMRegValue(src_precision);
-    int32_t kMaxInt = v8::internal::kMaxInt;
-    int32_t kMinInt = v8::internal::kMinInt;
-    switch (mode) {
-      case RM:
-        if (src_precision == kDoublePrecision) {
-          double val = get_double_from_d_register(src);
 
-          inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+    // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
+    // mode or the default Round to Zero mode.
+    VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
+                                                : RZ;
+    ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
 
-          int sint = unsigned_integer ? static_cast<uint32_t>(val) :
-                                        static_cast<int32_t>(val);
-          sint = sint > val ? sint - 1 : sint;
+    bool unsigned_integer = (instr->Bit(16) == 0);
+    bool double_precision = (src_precision == kDoublePrecision);
 
-          set_s_register_from_sinteger(dst, sint);
-        } else {
-          float val = get_float_from_s_register(src);
+    double val = double_precision ? get_double_from_d_register(src)
+                                  : get_float_from_s_register(src);
 
-          inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+    int temp = unsigned_integer ? static_cast<uint32_t>(val)
+                                : static_cast<int32_t>(val);
 
-          int sint = unsigned_integer ? static_cast<uint32_t>(val) :
-                                        static_cast<int32_t>(val);
-          sint = sint > val ? sint - 1 : sint;
+    inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
 
-          set_s_register_from_sinteger(dst, sint);
+    if (inv_op_vfp_flag_) {
+      temp = VFPConversionSaturate(val, unsigned_integer);
+    } else {
+      switch (mode) {
+        case RN: {
+          double abs_diff =
+            unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
+                             : fabs(val - temp);
+          int val_sign = (val > 0) ? 1 : -1;
+          if (abs_diff > 0.5) {
+            temp += val_sign;
+          } else if (abs_diff == 0.5) {
+            // Round to even if exactly halfway.
+            temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+          }
+          break;
         }
-        break;
-      case RZ:
-        if (src_precision == kDoublePrecision) {
-          double val = get_double_from_d_register(src);
 
-          inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+        case RM:
+          temp = temp > val ? temp - 1 : temp;
+          break;
 
-          int sint = unsigned_integer ? static_cast<uint32_t>(val) :
-                                        static_cast<int32_t>(val);
+        case RZ:
+          // Nothing to do.
+          break;
 
-          set_s_register_from_sinteger(dst, sint);
-        } else {
-          float val = get_float_from_s_register(src);
-
-          inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
-          int sint = unsigned_integer ? static_cast<uint32_t>(val) :
-                                        static_cast<int32_t>(val);
-
-          set_s_register_from_sinteger(dst, sint);
-        }
-        break;
-
-      default:
-        UNREACHABLE();
+        default:
+          UNREACHABLE();
+      }
     }
 
+    // Update the destination register.
+    set_s_register_from_sinteger(dst, temp);
+
   } else {
     bool unsigned_integer = (instr->Bit(7) == 0);
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index be44766..5256ae3 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -79,6 +79,7 @@
 
 #include "constants-arm.h"
 #include "hashmap.h"
+#include "assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -285,8 +286,9 @@
   static CachePage* GetCachePage(void* page);
 
   // Runtime call support.
-  static void* RedirectExternalReference(void* external_function,
-                                         bool fp_return);
+  static void* RedirectExternalReference(
+      void* external_function,
+      v8::internal::ExternalReference::Type type);
 
   // For use in calls that take two double values, constructed from r0, r1, r2
   // and r3.
@@ -312,7 +314,7 @@
   bool v_flag_FPSCR_;
 
   // VFP rounding mode. See ARM DDI 0406B Page A2-29.
-  FPSCRRoundingModes FPSCR_rounding_mode_;
+  VFPRoundingMode FPSCR_rounding_mode_;
 
   // VFP FP exception flags architecture state.
   bool inv_op_vfp_flag_;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 1e99e60..9ef6115 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -575,72 +575,94 @@
   __ CallStub(&stub);
 }
 
+static const int kFastApiCallArguments = 3;
 
 // Reserves space for the extra arguments to FastHandleApiCall in the
 // caller's frame.
 //
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
                                        Register scratch) {
   __ mov(scratch, Operand(Smi::FromInt(0)));
-  __ push(scratch);
-  __ push(scratch);
-  __ push(scratch);
-  __ push(scratch);
+  for (int i = 0; i < kFastApiCallArguments; i++) {
+    __ push(scratch);
+  }
 }
 
 
 // Undoes the effects of ReserveSpaceForFastApiCall.
 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
-  __ Drop(4);
+  __ Drop(kFastApiCallArguments);
 }
 
 
-// Generates call to FastHandleApiCall builtin.
-static void GenerateFastApiCall(MacroAssembler* masm,
-                                const CallOptimization& optimization,
-                                int argc) {
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]              : holder (set by CheckPrototypes)
+  //  -- sp[4]              : callee js function
+  //  -- sp[8]              : call data
+  //  -- sp[12]             : last js argument
+  //  -- ...
+  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 4) * 4] : receiver
+  // -----------------------------------
   // Get the function and setup the context.
   JSFunction* function = optimization.constant_function();
   __ mov(r5, Operand(Handle<JSFunction>(function)));
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  bool info_loaded = false;
-  Object* callback = optimization.api_call_info()->callback();
-  if (Heap::InNewSpace(callback)) {
-    info_loaded = true;
-    __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
-    __ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
-  } else {
-    __ Move(r7, Handle<Object>(callback));
-  }
   Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
   if (Heap::InNewSpace(call_data)) {
-    if (!info_loaded) {
-      __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
-    }
+    __ Move(r0, api_call_info_handle);
     __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
   } else {
     __ Move(r6, Handle<Object>(call_data));
   }
+  // Store js function and call data.
+  __ stm(ib, sp, r5.bit() | r6.bit());
 
-  __ add(sp, sp, Operand(1 * kPointerSize));
-  __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit());
-  __ sub(sp, sp, Operand(1 * kPointerSize));
+  // r2 points to call data as expected by Arguments
+  // (refer to layout above).
+  __ add(r2, sp, Operand(2 * kPointerSize));
 
-  // Set the number of arguments.
-  __ mov(r0, Operand(argc + 4));
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
 
-  // Jump to the fast api call builtin (tail call).
-  Handle<Code> code = Handle<Code>(
-      Builtins::builtin(Builtins::FastHandleApiCall));
-  ParameterCount expected(0);
-  __ InvokeCode(code, expected, expected,
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  const int kApiStackSpace = 4;
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  // r0 = v8::Arguments&
+  // Arguments is after the return address.
+  __ add(r0, sp, Operand(1 * kPointerSize));
+  // v8::Arguments::implicit_args = data
+  __ str(r2, MemOperand(r0, 0 * kPointerSize));
+  // v8::Arguments::values = last argument
+  __ add(ip, r2, Operand(argc * kPointerSize));
+  __ str(ip, MemOperand(r0, 1 * kPointerSize));
+  // v8::Arguments::length_ = argc
+  __ mov(ip, Operand(argc));
+  __ str(ip, MemOperand(r0, 2 * kPointerSize));
+  // v8::Arguments::is_construct_call = 0
+  __ mov(ip, Operand(0));
+  __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  MaybeObject* result = masm->TryCallApiFunctionAndReturn(
+      &fun, argc + kFastApiCallArguments + 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+  return Heap::undefined_value();
 }
 
-
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -650,16 +672,16 @@
         arguments_(arguments),
         name_(name) {}
 
-  void Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -669,17 +691,17 @@
     CallOptimization optimization(lookup);
 
     if (optimization.is_constant_call()) {
-      CompileCacheable(masm,
-                       object,
-                       receiver,
-                       scratch1,
-                       scratch2,
-                       scratch3,
-                       holder,
-                       lookup,
-                       name,
-                       optimization,
-                       miss);
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
     } else {
       CompileRegular(masm,
                      object,
@@ -690,21 +712,22 @@
                      name,
                      holder,
                      miss);
+      return Heap::undefined_value();
     }
   }
 
  private:
-  void CompileCacheable(MacroAssembler* masm,
-                       JSObject* object,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       JSObject* interceptor_holder,
-                       LookupResult* lookup,
-                       String* name,
-                       const CallOptimization& optimization,
-                       Label* miss_label) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -768,7 +791,10 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       __ InvokeFunction(optimization.constant_function(), arguments_,
                         JUMP_FUNCTION);
@@ -786,6 +812,8 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
+
+    return Heap::undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -2055,11 +2083,11 @@
   //  - Make sure Flush-to-zero mode control bit is unset (bit 22).
   __ bic(r9, r3,
       Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
-  __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits));
+  __ orr(r9, r9, Operand(kRoundToMinusInf));
   __ vmsr(r9);
 
   // Convert the argument to an integer.
-  __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
+  __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
 
   // Use vcvt latency to start checking for special cases.
   // Get the argument exponent and clear the sign bit.
@@ -2368,7 +2396,8 @@
   }
 
   if (depth != kInvalidProtoDepth) {
-    GenerateFastApiCall(masm(), optimization, argc);
+    MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+    if (result->IsFailure()) return result;
   } else {
     __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
   }
@@ -2412,16 +2441,19 @@
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
   CallInterceptorCompiler compiler(this, arguments(), r2);
-  compiler.Compile(masm(),
-                   object,
-                   holder,
-                   name,
-                   &lookup,
-                   r1,
-                   r3,
-                   r4,
-                   r0,
-                   &miss);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         r1,
+                                         r3,
+                                         r4,
+                                         r0,
+                                         &miss);
+  if (result->IsFailure()) {
+      return result;
+  }
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
@@ -3087,6 +3119,38 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map matches.
+  __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
+
+  GenerateFastPixelArrayLoad(masm(),
+                             r1,
+                             r0,
+                             r2,
+                             r3,
+                             r4,
+                             r5,
+                             r0,
+                             &miss,
+                             &miss,
+                             &miss);
+
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
                                                        Map* transition,
@@ -3764,9 +3828,9 @@
 
       // Not infinity or NaN simply convert to int.
       if (IsElementTypeSigned(array_type)) {
-        __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
+        __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
       } else {
-        __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
+        __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
       }
       __ vmov(r5, s0, ne);
 
diff --git a/src/array.js b/src/array.js
index a0e3e0b..1298434 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1018,9 +1018,11 @@
   } else {
     index = TO_INTEGER(index);
     // If index is negative, index from the end of the array.
-    if (index < 0) index = length + index;
-    // If index is still negative, search the entire array.
-    if (index < 0) index = 0;
+    if (index < 0) {
+      index = length + index;
+      // If index is still negative, search the entire array.
+      if (index < 0) index = 0;
+    }
   }
   var min = index;
   var max = length;
diff --git a/src/assembler.cc b/src/assembler.cc
index ca72d63..ef2094f 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -553,8 +553,9 @@
   : address_(Redirect(Builtins::c_function_address(id))) {}
 
 
-ExternalReference::ExternalReference(ApiFunction* fun)
-  : address_(Redirect(fun->address())) {}
+ExternalReference::ExternalReference(
+    ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
+  : address_(Redirect(fun->address(), type)) {}
 
 
 ExternalReference::ExternalReference(Builtins::Name name)
@@ -888,17 +889,18 @@
       UNREACHABLE();
   }
   // Passing true as 2nd parameter indicates that they return an fp value.
-  return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
+  return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
 }
 
 
 ExternalReference ExternalReference::compare_doubles() {
   return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
-                                    false));
+                                    BUILTIN_CALL));
 }
 
 
-ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+ExternalReference::ExternalReferenceRedirector*
+    ExternalReference::redirector_ = NULL;
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/assembler.h b/src/assembler.h
index a29aa06..e8bc5d6 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -459,9 +459,6 @@
 #endif
 
 
-typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
-
-
 // An ExternalReference represents a C++ address used in the generated
 // code. All references to C++ functions and variables must be encapsulated in
 // an ExternalReference instance. This is done in order to track the origin of
@@ -469,9 +466,29 @@
 // addresses when deserializing a heap.
 class ExternalReference BASE_EMBEDDED {
  public:
+  // Used in the simulator to support different native api calls.
+  //
+  // BUILTIN_CALL - builtin call.
+  // MaybeObject* f(v8::internal::Arguments).
+  //
+  // FP_RETURN_CALL - builtin call that returns floating point.
+  // double f(double, double).
+  //
+  // DIRECT_CALL - direct call to API function native callback
+  // from generated code.
+  // Handle<Value> f(v8::Arguments&)
+  //
+  enum Type {
+    BUILTIN_CALL,  // default
+    FP_RETURN_CALL,
+    DIRECT_CALL
+  };
+
+  typedef void* ExternalReferenceRedirector(void* original, Type type);
+
   explicit ExternalReference(Builtins::CFunctionId id);
 
-  explicit ExternalReference(ApiFunction* ptr);
+  explicit ExternalReference(ApiFunction* ptr, Type type);
 
   explicit ExternalReference(Builtins::Name name);
 
@@ -599,17 +616,19 @@
 
   static ExternalReferenceRedirector* redirector_;
 
-  static void* Redirect(void* address, bool fp_return = false) {
+  static void* Redirect(void* address,
+                        Type type = ExternalReference::BUILTIN_CALL) {
     if (redirector_ == NULL) return address;
-    void* answer = (*redirector_)(address, fp_return);
+    void* answer = (*redirector_)(address, type);
     return answer;
   }
 
-  static void* Redirect(Address address_arg, bool fp_return = false) {
+  static void* Redirect(Address address_arg,
+                        Type type = ExternalReference::BUILTIN_CALL) {
     void* address = reinterpret_cast<void*>(address_arg);
     void* answer = (redirector_ == NULL) ?
                    address :
-                   (*redirector_)(address, fp_return);
+                   (*redirector_)(address, type);
     return answer;
   }
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index f80c89b..0d0e37f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -75,7 +75,8 @@
   V(GetProperty)               \
   V(SetProperty)               \
   V(InvokeBuiltin)             \
-  V(RegExpCEntry)
+  V(RegExpCEntry)              \
+  V(DirectCEntry)
 #else
 #define CODE_STUB_LIST_ARM(V)
 #endif
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index 6534e7f..5467789 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -55,6 +55,10 @@
 
 Scope* CodeGenerator::scope() { return info_->function()->scope(); }
 
+StrictModeFlag CodeGenerator::strict_mode_flag() {
+  return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODEGEN_INL_H_
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 38438cb..cccb7a4 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -136,7 +136,8 @@
       : CompilationSubCache(generations) { }
 
   Handle<SharedFunctionInfo> Lookup(Handle<String> source,
-                                    Handle<Context> context);
+                                    Handle<Context> context,
+                                    StrictModeFlag strict_mode);
 
   void Put(Handle<String> source,
            Handle<Context> context,
@@ -371,7 +372,9 @@
 
 
 Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
-    Handle<String> source, Handle<Context> context) {
+    Handle<String> source,
+    Handle<Context> context,
+    StrictModeFlag strict_mode) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -380,7 +383,7 @@
   { HandleScope scope;
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
-      result = table->LookupEval(*source, *context);
+      result = table->LookupEval(*source, *context, strict_mode);
       if (result->IsSharedFunctionInfo()) {
         break;
       }
@@ -503,18 +506,20 @@
 }
 
 
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
-                                                        Handle<Context> context,
-                                                        bool is_global) {
+Handle<SharedFunctionInfo> CompilationCache::LookupEval(
+    Handle<String> source,
+    Handle<Context> context,
+    bool is_global,
+    StrictModeFlag strict_mode) {
   if (!IsEnabled()) {
     return Handle<SharedFunctionInfo>::null();
   }
 
   Handle<SharedFunctionInfo> result;
   if (is_global) {
-    result = eval_global.Lookup(source, context);
+    result = eval_global.Lookup(source, context, strict_mode);
   } else {
-    result = eval_contextual.Lookup(source, context);
+    result = eval_contextual.Lookup(source, context, strict_mode);
   }
   return result;
 }
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 37e21be..f779a23 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -51,7 +51,8 @@
   // contain a script for the given source string.
   static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
                                                Handle<Context> context,
-                                               bool is_global);
+                                               bool is_global,
+                                               StrictModeFlag strict_mode);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
diff --git a/src/compiler.cc b/src/compiler.cc
index 5c18c3e..77111a8 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -548,7 +548,8 @@
 
 Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
                                                  Handle<Context> context,
-                                                 bool is_global) {
+                                                 bool is_global,
+                                                 StrictModeFlag strict_mode) {
   int source_length = source->length();
   Counters::total_eval_size.Increment(source_length);
   Counters::total_compile_size.Increment(source_length);
@@ -559,7 +560,10 @@
   // Do a lookup in the compilation cache; if the entry is not there, invoke
   // the compiler and add the result to the cache.
   Handle<SharedFunctionInfo> result;
-  result = CompilationCache::LookupEval(source, context, is_global);
+  result = CompilationCache::LookupEval(source,
+                                        context,
+                                        is_global,
+                                        strict_mode);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
@@ -567,9 +571,14 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
+    if (strict_mode == kStrictMode) info.MarkAsStrict();
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
+      // If caller is strict mode, the result must be strict as well,
+      // but not the other way around. Consider:
+      // eval("'use strict'; ...");
+      ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
       CompilationCache::PutEval(source, context, is_global, result);
     }
   }
@@ -762,6 +771,7 @@
       *lit->this_property_assignments());
   function_info->set_try_full_codegen(lit->try_full_codegen());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+  function_info->set_strict_mode(lit->strict_mode());
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 44ac9c8..9843dd6 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -49,6 +49,7 @@
   bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
   bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
   bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
+  bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
   bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
@@ -69,6 +70,13 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
+  void MarkAsStrict() {
+    ASSERT(!is_lazy());
+    flags_ |= IsStrict::encode(true);
+  }
+  StrictModeFlag StrictMode() {
+    return is_strict() ? kStrictMode : kNonStrictMode;
+  }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
     flags_ |= IsInLoop::encode(true);
@@ -162,6 +170,8 @@
   class IsGlobal: public BitField<bool, 2, 1> {};
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
+  // Strict mode - used in eager compilation.
+  class IsStrict: public BitField<bool, 4, 1> {};
 
   unsigned flags_;
 
@@ -230,7 +240,8 @@
   // Compile a String source within a context for Eval.
   static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
                                                 Handle<Context> context,
-                                                bool is_global);
+                                                bool is_global,
+                                                StrictModeFlag strict_mode);
 
   // Compile from function info (used for lazy compilation). Returns true on
   // success and false if the compilation resulted in a stack overflow.
diff --git a/src/conversions.cc b/src/conversions.cc
index a954d6c..a348235 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -125,8 +125,8 @@
 }
 
 
-static double SignedZero(bool sign) {
-  return sign ? -0.0 : 0.0;
+static double SignedZero(bool negative) {
+  return negative ? -0.0 : 0.0;
 }
 
 
@@ -134,14 +134,14 @@
 template <int radix_log_2, class Iterator, class EndMark>
 static double InternalStringToIntDouble(Iterator current,
                                         EndMark end,
-                                        bool sign,
+                                        bool negative,
                                         bool allow_trailing_junk) {
   ASSERT(current != end);
 
   // Skip leading 0s.
   while (*current == '0') {
     ++current;
-    if (current == end) return SignedZero(sign);
+    if (current == end) return SignedZero(negative);
   }
 
   int64_t number = 0;
@@ -217,7 +217,7 @@
   ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
 
   if (exponent == 0) {
-    if (sign) {
+    if (negative) {
       if (number == 0) return -0.0;
       number = -number;
     }
@@ -227,7 +227,7 @@
   ASSERT(number != 0);
   // The double could be constructed faster from number (mantissa), exponent
   // and sign. Assuming it's a rare case more simple code is used.
-  return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
+  return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
 }
 
 
@@ -238,7 +238,7 @@
 
   if (!AdvanceToNonspace(&current, end)) return empty_string_val;
 
-  bool sign = false;
+  bool negative = false;
   bool leading_zero = false;
 
   if (*current == '+') {
@@ -248,14 +248,14 @@
   } else if (*current == '-') {
     ++current;
     if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
-    sign = true;
+    negative = true;
   }
 
   if (radix == 0) {
     // Radix detection.
     if (*current == '0') {
       ++current;
-      if (current == end) return SignedZero(sign);
+      if (current == end) return SignedZero(negative);
       if (*current == 'x' || *current == 'X') {
         radix = 16;
         ++current;
@@ -271,7 +271,7 @@
     if (*current == '0') {
       // Allow "0x" prefix.
       ++current;
-      if (current == end) return SignedZero(sign);
+      if (current == end) return SignedZero(negative);
       if (*current == 'x' || *current == 'X') {
         ++current;
         if (current == end) return JUNK_STRING_VALUE;
@@ -287,7 +287,7 @@
   while (*current == '0') {
     leading_zero = true;
     ++current;
-    if (current == end) return SignedZero(sign);
+    if (current == end) return SignedZero(negative);
   }
 
   if (!leading_zero && !isDigit(*current, radix)) {
@@ -298,21 +298,21 @@
     switch (radix) {
       case 2:
         return InternalStringToIntDouble<1>(
-                   current, end, sign, allow_trailing_junk);
+                   current, end, negative, allow_trailing_junk);
       case 4:
         return InternalStringToIntDouble<2>(
-                   current, end, sign, allow_trailing_junk);
+                   current, end, negative, allow_trailing_junk);
       case 8:
         return InternalStringToIntDouble<3>(
-                   current, end, sign, allow_trailing_junk);
+                   current, end, negative, allow_trailing_junk);
 
       case 16:
         return InternalStringToIntDouble<4>(
-                   current, end, sign, allow_trailing_junk);
+                   current, end, negative, allow_trailing_junk);
 
       case 32:
         return InternalStringToIntDouble<5>(
-                   current, end, sign, allow_trailing_junk);
+                   current, end, negative, allow_trailing_junk);
       default:
         UNREACHABLE();
     }
@@ -344,7 +344,7 @@
     ASSERT(buffer_pos < kBufferSize);
     buffer[buffer_pos] = '\0';
     Vector<const char> buffer_vector(buffer, buffer_pos);
-    return sign ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
+    return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
   }
 
   // The following code causes accumulating rounding error for numbers greater
@@ -406,7 +406,7 @@
     return JUNK_STRING_VALUE;
   }
 
-  return sign ? -v : v;
+  return negative ? -v : v;
 }
 
 
@@ -445,7 +445,7 @@
   bool nonzero_digit_dropped = false;
   bool fractional_part = false;
 
-  bool sign = false;
+  bool negative = false;
 
   if (*current == '+') {
     // Ignore leading sign.
@@ -454,7 +454,7 @@
   } else if (*current == '-') {
     ++current;
     if (current == end) return JUNK_STRING_VALUE;
-    sign = true;
+    negative = true;
   }
 
   static const char kInfinitySymbol[] = "Infinity";
@@ -468,13 +468,13 @@
     }
 
     ASSERT(buffer_pos == 0);
-    return sign ? -V8_INFINITY : V8_INFINITY;
+    return negative ? -V8_INFINITY : V8_INFINITY;
   }
 
   bool leading_zero = false;
   if (*current == '0') {
     ++current;
-    if (current == end) return SignedZero(sign);
+    if (current == end) return SignedZero(negative);
 
     leading_zero = true;
 
@@ -487,14 +487,14 @@
 
       return InternalStringToIntDouble<4>(current,
                                           end,
-                                          sign,
+                                          negative,
                                           allow_trailing_junk);
     }
 
     // Ignore leading zeros in the integer part.
     while (*current == '0') {
       ++current;
-      if (current == end) return SignedZero(sign);
+      if (current == end) return SignedZero(negative);
     }
   }
 
@@ -539,7 +539,7 @@
       // leading zeros (if any).
       while (*current == '0') {
         ++current;
-        if (current == end) return SignedZero(sign);
+        if (current == end) return SignedZero(negative);
         exponent--;  // Move this 0 into the exponent.
       }
     }
@@ -631,7 +631,7 @@
   if (octal) {
     return InternalStringToIntDouble<3>(buffer,
                                         buffer + buffer_pos,
-                                        sign,
+                                        negative,
                                         allow_trailing_junk);
   }
 
@@ -644,7 +644,7 @@
   buffer[buffer_pos] = '\0';
 
   double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
-  return sign ? -converted : converted;
+  return negative ? -converted : converted;
 }
 
 
@@ -702,26 +702,12 @@
 
 
 const char* DoubleToCString(double v, Vector<char> buffer) {
-  StringBuilder builder(buffer.start(), buffer.length());
-
   switch (fpclassify(v)) {
-    case FP_NAN:
-      builder.AddString("NaN");
-      break;
-
-    case FP_INFINITE:
-      if (v < 0.0) {
-        builder.AddString("-Infinity");
-      } else {
-        builder.AddString("Infinity");
-      }
-      break;
-
-    case FP_ZERO:
-      builder.AddCharacter('0');
-      break;
-
+    case FP_NAN: return "NaN";
+    case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
+    case FP_ZERO: return "0";
     default: {
+      StringBuilder builder(buffer.start(), buffer.length());
       int decimal_point;
       int sign;
       const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -764,9 +750,9 @@
         if (exponent < 0) exponent = -exponent;
         builder.AddFormatted("%d", exponent);
       }
+    return builder.Finalize();
     }
   }
-  return builder.Finalize();
 }
 
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index f081576..00e7d0e 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -817,7 +817,7 @@
   // call to an unconditional call to the replacement code.
   ASSERT(unoptimized_code->kind() == Code::FUNCTION);
   Address stack_check_cursor = unoptimized_code->instruction_start() +
-      unoptimized_code->stack_check_table_start();
+      unoptimized_code->stack_check_table_offset();
   uint32_t table_length = Memory::uint32_at(stack_check_cursor);
   stack_check_cursor += kIntSize;
   for (uint32_t i = 0; i < table_length; ++i) {
@@ -836,7 +836,7 @@
   // stack check calls.
   ASSERT(unoptimized_code->kind() == Code::FUNCTION);
   Address stack_check_cursor = unoptimized_code->instruction_start() +
-      unoptimized_code->stack_check_table_start();
+      unoptimized_code->stack_check_table_offset();
   uint32_t table_length = Memory::uint32_at(stack_check_cursor);
   stack_check_cursor += kIntSize;
   for (uint32_t i = 0; i < table_length; ++i) {
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 194a299..243abf0 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -313,12 +313,12 @@
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
   int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
-      ? static_cast<int>(code->safepoint_table_start())
+      ? static_cast<int>(code->safepoint_table_offset())
       : code->instruction_size();
   // If there might be a stack check table, stop before reaching it.
   if (code->kind() == Code::FUNCTION) {
     decode_size =
-        Min(decode_size, static_cast<int>(code->stack_check_table_start()));
+        Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
   }
 
   byte* begin = code->instruction_start();
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index b8f081c..63daa05 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,8 +40,12 @@
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+  bool compact = false;
   // All allocation spaces other than NEW_SPACE have the same effect.
-  Heap::CollectAllGarbage(false);
+  if (args.Length() >= 1 && args[0]->IsBoolean()) {
+    compact = args[0]->BooleanValue();
+  }
+  Heap::CollectAllGarbage(compact);
   return v8::Undefined();
 }
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index af3ac00..4ed3fec 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -304,7 +304,7 @@
   cgen.PopulateDeoptimizationData(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
   code->set_allow_osr_at_loop_nesting_level(0);
-  code->set_stack_check_table_start(table_offset);
+  code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // may be an empty handle.
 #ifdef ENABLE_GDB_JIT_INTERFACE
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 0482ee8..2d0998d 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -531,6 +531,9 @@
 
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
+  StrictModeFlag strict_mode_flag() {
+    return function()->strict_mode() ? kStrictMode : kNonStrictMode;
+  }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return info_->scope(); }
 
diff --git a/src/handles.cc b/src/handles.cc
index 461c3f5..274c34d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -873,7 +873,7 @@
                                            int expected_additional_properties,
                                            bool condition) {
   object_ = object;
-  if (condition && object_->HasFastProperties()) {
+  if (condition && object_->HasFastProperties() && !object->IsJSGlobalProxy()) {
     // Normalize the properties of object to avoid n^2 behavior
     // when extending the object multiple properties. Indicate the number of
     // properties to be added.
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index dfda7c6..732d2f4 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -373,6 +373,7 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
+      Heap::CollectAllGarbage(true);
       HeapSnapshotGenerator generator(result, control);
       generation_completed = generator.GenerateSnapshot();
       break;
@@ -808,7 +809,7 @@
 
 
 void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     CollectStats(obj);
     agg_snapshot_->js_cons_profile()->CollectStats(obj);
diff --git a/src/heap.cc b/src/heap.cc
index 0093829..0e3a2b8 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1943,6 +1943,14 @@
 }
 
 
+#if V8_TARGET_ARCH_ARM
+void Heap::CreateDirectCEntryStub() {
+  DirectCEntryStub stub;
+  set_direct_c_entry_code(*stub.GetCode());
+}
+#endif
+
+
 void Heap::CreateFixedStubs() {
   // Here we create roots for fixed stubs. They are needed at GC
   // for cooking and uncooking (check out frames.cc).
@@ -1963,6 +1971,9 @@
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
   Heap::CreateRegExpCEntryStub();
 #endif
+#if V8_TARGET_ARCH_ARM
+  Heap::CreateDirectCEntryStub();
+#endif
 }
 
 
diff --git a/src/heap.h b/src/heap.h
index e051b50..dcd813b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -122,7 +122,12 @@
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #define STRONG_ROOT_LIST(V)                                                    \
   UNCONDITIONAL_STRONG_ROOT_LIST(V)                                            \
-  V(Code, re_c_entry_code, RegExpCEntryCode)
+  V(Code, re_c_entry_code, RegExpCEntryCode)                                   \
+  V(Code, direct_c_entry_code, DirectCEntryCode)
+#elif V8_TARGET_ARCH_ARM
+#define STRONG_ROOT_LIST(V)                                                    \
+  UNCONDITIONAL_STRONG_ROOT_LIST(V)                                            \
+  V(Code, direct_c_entry_code, DirectCEntryCode)
 #else
 #define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
 #endif
@@ -178,6 +183,7 @@
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
   V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized")                 \
   V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized")               \
+  V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray")                   \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -1319,12 +1325,13 @@
   static bool CreateInitialMaps();
   static bool CreateInitialObjects();
 
-  // These four Create*EntryStub functions are here and forced to not be inlined
+  // These five Create*EntryStub functions are here and forced to not be inlined
   // because of a gcc-4.4 bug that assigns wrong vtable entries.
   NO_INLINE(static void CreateCEntryStub());
   NO_INLINE(static void CreateJSEntryStub());
   NO_INLINE(static void CreateJSConstructEntryStub());
   NO_INLINE(static void CreateRegExpCEntryStub());
+  NO_INLINE(static void CreateDirectCEntryStub());
 
   static void CreateFixedStubs();
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index c24ddfc..0ff41ba 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1193,7 +1193,15 @@
 
 
 void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
-  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+  value()->PrintNameTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
+  context()->PrintNameTo(stream);
+  stream->Add("[%d] = ", slot_index());
+  value()->PrintNameTo(stream);
 }
 
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index eaab8ad..f1093a0 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -98,6 +98,7 @@
   V(CompareJSObjectEq)                         \
   V(CompareMap)                                \
   V(Constant)                                  \
+  V(Context)                                   \
   V(DeleteProperty)                            \
   V(Deoptimize)                                \
   V(Div)                                       \
@@ -129,6 +130,7 @@
   V(Mul)                                       \
   V(ObjectLiteral)                             \
   V(OsrEntry)                                  \
+  V(OuterContext)                              \
   V(Parameter)                                 \
   V(Power)                                     \
   V(PushArgument)                              \
@@ -139,6 +141,7 @@
   V(Shr)                                       \
   V(Simulate)                                  \
   V(StackCheck)                                \
+  V(StoreContextSlot)                          \
   V(StoreGlobal)                               \
   V(StoreKeyedFastElement)                     \
   V(StoreKeyedGeneric)                         \
@@ -163,6 +166,7 @@
   V(GlobalVars)                                \
   V(Maps)                                      \
   V(ArrayLengths)                              \
+  V(ContextSlots)                              \
   V(OsrEntries)
 
 #define DECLARE_INSTRUCTION(type)                   \
@@ -1060,12 +1064,39 @@
 };
 
 
-class HGlobalObject: public HInstruction {
+class HContext: public HInstruction {
  public:
-  HGlobalObject() {
+  HContext() {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnCalls);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HOuterContext: public HUnaryOperation {
+ public:
+  explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HGlobalObject: public HUnaryOperation {
+ public:
+  explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
@@ -1075,12 +1106,12 @@
 };
 
 
-class HGlobalReceiver: public HInstruction {
+class HGlobalReceiver: public HUnaryOperation {
  public:
-  HGlobalReceiver() {
+  explicit HGlobalReceiver(HValue* global_object)
+      : HUnaryOperation(global_object) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnCalls);
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
@@ -2613,35 +2644,66 @@
 };
 
 
-class HLoadContextSlot: public HInstruction {
+class HLoadContextSlot: public HUnaryOperation {
  public:
-  HLoadContextSlot(int context_chain_length , int slot_index)
-      : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+  HLoadContextSlot(HValue* context , int slot_index)
+      : HUnaryOperation(context), slot_index_(slot_index) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnCalls);
+    SetFlag(kDependsOnContextSlots);
   }
 
-  int context_chain_length() const { return context_chain_length_; }
   int slot_index() const { return slot_index_; }
 
-  virtual void PrintDataTo(StringStream* stream) const;
-
-  virtual intptr_t Hashcode() const {
-    return context_chain_length() * 29 + slot_index();
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
   }
 
+  virtual void PrintDataTo(StringStream* stream) const;
+
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
 
  protected:
   virtual bool DataEquals(HValue* other) const {
     HLoadContextSlot* b = HLoadContextSlot::cast(other);
-    return (context_chain_length() == b->context_chain_length())
-        && (slot_index() == b->slot_index());
+    return (slot_index() == b->slot_index());
   }
 
  private:
-  int context_chain_length_;
+  int slot_index_;
+};
+
+
+static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+  return !value->type().IsSmi() &&
+      !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+}
+
+
+class HStoreContextSlot: public HBinaryOperation {
+ public:
+  HStoreContextSlot(HValue* context, int slot_index, HValue* value)
+      : HBinaryOperation(context, value), slot_index_(slot_index) {
+    SetFlag(kChangesContextSlots);
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+  int slot_index() const { return slot_index_; }
+
+  bool NeedsWriteBarrier() const {
+    return StoringValueNeedsWriteBarrier(value());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+
+ private:
   int slot_index_;
 };
 
@@ -2777,12 +2839,6 @@
 };
 
 
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
-  return !value->type().IsSmi() &&
-      !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
 class HStoreNamed: public HBinaryOperation {
  public:
   HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
@@ -2800,10 +2856,6 @@
   HValue* value() const { return OperandAt(1); }
   void set_value(HValue* value) { SetOperandAt(1, value); }
 
-  bool NeedsWriteBarrier() const {
-    return StoringValueNeedsWriteBarrier(value());
-  }
-
   DECLARE_INSTRUCTION(StoreNamed)
 
  private:
@@ -2831,7 +2883,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
 
   virtual Representation RequiredInputRepresentation(int index) const {
-    return  Representation::Tagged();
+    return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream) const;
 
@@ -2840,6 +2892,10 @@
   Handle<Map> transition() const { return transition_; }
   void set_transition(Handle<Map> map) { transition_ = map; }
 
+  bool NeedsWriteBarrier() const {
+    return StoringValueNeedsWriteBarrier(value());
+  }
+
  private:
   bool is_in_object_;
   int offset_;
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 9d5aa2b..4044f7f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -2955,6 +2955,19 @@
 }
 
 
+HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+  ASSERT(var->IsContextSlot());
+  HInstruction* context = new HContext;
+  AddInstruction(context);
+  int length = graph()->info()->scope()->ContextChainLength(var->scope());
+  while (length-- > 0) {
+    context = new HOuterContext(context);
+    AddInstruction(context);
+  }
+  return context;
+}
+
+
 void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   Variable* variable = expr->AsVariable();
   if (variable == NULL) {
@@ -2968,16 +2981,9 @@
     if (variable->mode() == Variable::CONST) {
       BAILOUT("reference to const context slot");
     }
-    Slot* slot = variable->AsSlot();
-    CompilationInfo* info = graph()->info();
-    int context_chain_length = info->function()->scope()->
-        ContextChainLength(slot->var()->scope());
-    ASSERT(context_chain_length >= 0);
-    // TODO(antonm): if slot's value is not modified by closures, instead
-    // of reading it out of context, we could just embed the value as
-    // a constant.
-    HLoadContextSlot* instr =
-        new HLoadContextSlot(context_chain_length, slot->index());
+    HValue* context = BuildContextChainWalk(variable);
+    int index = variable->AsSlot()->index();
+    HLoadContextSlot* instr = new HLoadContextSlot(context, index);
     ast_context()->ReturnInstruction(instr, expr->id());
   } else if (variable->is_global()) {
     LookupResult lookup;
@@ -3515,35 +3521,48 @@
     if (proxy->IsArguments()) BAILOUT("assignment to arguments");
 
     // Handle the assignment.
-    if (var->is_global()) {
+    if (var->IsStackAllocated()) {
+      HValue* value = NULL;
+      // Handle stack-allocated variables on the right-hand side directly.
+      // We do not allow the arguments object to occur in a context where it
+      // may escape, but assignments to stack-allocated locals are
+      // permitted.  Handling such assignments here bypasses the check for
+      // the arguments object in VisitVariableProxy.
+      Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
+      if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
+        value = environment()->Lookup(rhs_var);
+      } else {
+        VISIT_FOR_VALUE(expr->value());
+        value = Pop();
+      }
+      Bind(var, value);
+      ast_context()->ReturnValue(value);
+
+    } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
+      VISIT_FOR_VALUE(expr->value());
+      HValue* context = BuildContextChainWalk(var);
+      int index = var->AsSlot()->index();
+      HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+      AddInstruction(instr);
+      if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      ast_context()->ReturnValue(Pop());
+
+    } else if (var->is_global()) {
       VISIT_FOR_VALUE(expr->value());
       HandleGlobalVariableAssignment(var,
                                      Top(),
                                      expr->position(),
                                      expr->AssignmentId());
-    } else if (var->IsStackAllocated()) {
-      // We allow reference to the arguments object only in assignemtns
-      // to local variables to make sure that the arguments object does
-      // not escape and is not modified.
-      VariableProxy* rhs = expr->value()->AsVariableProxy();
-      if (rhs != NULL &&
-          rhs->var()->IsStackAllocated() &&
-          environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
-        Push(environment()->Lookup(rhs->var()));
-      } else {
-        VISIT_FOR_VALUE(expr->value());
-      }
-      Bind(proxy->var(), Top());
+      ast_context()->ReturnValue(Pop());
+
     } else {
-      BAILOUT("Assigning to no non-stack-allocated/non-global variable");
+      BAILOUT("assignment to LOOKUP or const CONTEXT variable");
     }
-    // Return the value.
-    ast_context()->ReturnValue(Pop());
 
   } else if (prop != NULL) {
     HandlePropertyAssignment(expr);
   } else {
-    BAILOUT("unsupported invalid lhs");
+    BAILOUT("invalid left-hand side in assignment");
   }
 }
 
@@ -4422,7 +4441,10 @@
       if (known_global_function) {
         // Push the global object instead of the global receiver because
         // code generated by the full code generator expects it.
-        PushAndAdd(new HGlobalObject);
+        HContext* context = new HContext;
+        HGlobalObject* global_object = new HGlobalObject(context);
+        AddInstruction(context);
+        PushAndAdd(global_object);
         VisitArgumentList(expr->arguments());
         CHECK_BAILOUT;
 
@@ -4431,7 +4453,7 @@
         AddInstruction(new HCheckFunction(function, expr->target()));
 
         // Replace the global object with the global receiver.
-        HGlobalReceiver* global_receiver = new HGlobalReceiver;
+        HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
         // Index of the receiver from the top of the expression stack.
         const int receiver_index = argument_count - 1;
         AddInstruction(global_receiver);
@@ -4458,7 +4480,9 @@
 
         call = new HCallKnownGlobal(expr->target(), argument_count);
       } else {
-        PushAndAdd(new HGlobalObject);
+        HContext* context = new HContext;
+        AddInstruction(context);
+        PushAndAdd(new HGlobalObject(context));
         VisitArgumentList(expr->arguments());
         CHECK_BAILOUT;
 
@@ -4466,7 +4490,11 @@
       }
 
     } else {
-      PushAndAdd(new HGlobalReceiver);
+      HContext* context = new HContext;
+      HGlobalObject* global_object = new HGlobalObject(context);
+      AddInstruction(context);
+      AddInstruction(global_object);
+      PushAndAdd(new HGlobalReceiver(global_object));
       VisitArgumentList(expr->arguments());
       CHECK_BAILOUT;
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 952c04f..0178f3f 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -823,6 +823,8 @@
                                HValue* switch_value,
                                CaseClause* clause);
 
+  HValue* BuildContextChainWalk(Variable* var);
+
   void AddCheckConstantFunction(Call* expr,
                                 HValue* receiver,
                                 Handle<Map> receiver_map,
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index a9c3085..cfee970 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -6511,6 +6511,54 @@
 }
 
 
+// Loads a indexed element from a pixel array.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements,
+                                Register untagged_key,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  //   key - holds the key and is unchanged (must be a smi).
+  //   elements - is set to the the receiver's element if
+  //       the receiver doesn't have a pixel array or the
+  //       key is not a smi, otherwise it's the elements'
+  //       external pointer.
+  //   untagged_key - is set to the untagged key
+
+  // Some callers already have verified that the key is a smi.  key_not_smi is
+  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
+  // to ensure the key is a smi must be added.
+  if (key_not_smi != NULL) {
+    __ JumpIfNotSmi(key, key_not_smi);
+  } else {
+    if (FLAG_debug_code) {
+      __ AbortIfNotSmi(key);
+    }
+  }
+  __ mov(untagged_key, key);
+  __ SmiUntag(untagged_key);
+
+  // Verify that the receiver has pixel array elements.
+  __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+  // Key must be in range.
+  __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+  __ j(above_equal, out_of_range);  // unsigned check handles negative keys.
+
+  // Perform the indexed load and tag the result as a smi.
+  __ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+  __ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
+  __ SmiTag(result);
+  __ ret(0);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index cd18749..2064574 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -490,6 +490,25 @@
 };
 
 
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements,
+                                Register untagged_key,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 73c952a..52de32b 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -6102,9 +6102,12 @@
       }
       frame_->PushParameterAt(-1);
 
+      // Push the strict mode flag.
+      frame_->Push(Smi::FromInt(strict_mode_flag()));
+
       // Resolve the call.
       result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
 
       done.Jump(&result);
       slow.Bind();
@@ -6121,8 +6124,11 @@
     }
     frame_->PushParameterAt(-1);
 
+    // Push the strict mode flag.
+    frame_->Push(Smi::FromInt(strict_mode_flag()));
+
     // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
     // If we generated fast-case code bind the jump-target where fast
     // and slow case merge.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 46b12cb..27e3396 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -365,6 +365,7 @@
   // Accessors
   inline bool is_eval();
   inline Scope* scope();
+  inline StrictModeFlag strict_mode_flag();
 
   // Generating deferred code.
   void ProcessDeferred();
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 35c6713..a646052 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -45,6 +45,16 @@
 }
 
 
+static void ZapCodeRange(Address start, Address end) {
+#ifdef DEBUG
+  ASSERT(start <= end);
+  int size = end - start;
+  CodePatcher destroyer(start, size);
+  while (size-- > 0) destroyer.masm()->int3();
+#endif
+}
+
+
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
   AssertNoAllocation no_allocation;
 
@@ -52,90 +62,61 @@
 
   // Get the optimized code.
   Code* code = function->code();
-
-  // For each return after a safepoint insert a absolute call to the
-  // corresponding deoptimization entry.
-  unsigned last_pc_offset = 0;
-  SafepointTable table(function->code());
+  Address code_start_address = code->instruction_start();
 
   // We will overwrite the code's relocation info in-place. Relocation info
-  // is written backward. The relocation info is the payload of a byte array.
-  // Later on we will align this at the start of the byte array and create
-  // a trash byte array of the remaining space.
+  // is written backward. The relocation info is the payload of a byte
+  // array.  Later on we will slide this to the start of the byte array and
+  // create a filler object in the remaining space.
   ByteArray* reloc_info = code->relocation_info();
-  Address end_address = reloc_info->address() + reloc_info->Size();
-  RelocInfoWriter reloc_info_writer(end_address, code->instruction_start());
+  Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+  RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
 
-  for (unsigned i = 0; i < table.length(); i++) {
-    unsigned pc_offset = table.GetPcOffset(i);
+  // For each return after a safepoint insert a call to the corresponding
+  // deoptimization entry.  Since the call is a relative encoding, write new
+  // reloc info.  We do not need any of the existing reloc info because the
+  // existing code will not be used again (we zap it in debug builds).
+  SafepointTable table(code);
+  Address prev_address = code_start_address;
+  for (unsigned i = 0; i < table.length(); ++i) {
+    Address curr_address = code_start_address + table.GetPcOffset(i);
+    ZapCodeRange(prev_address, curr_address);
+
     SafepointEntry safepoint_entry = table.GetEntry(i);
     int deoptimization_index = safepoint_entry.deoptimization_index();
-    int gap_code_size = safepoint_entry.gap_code_size();
-#ifdef DEBUG
-    // Destroy the code which is not supposed to run again.
-    unsigned instructions = pc_offset - last_pc_offset;
-    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                          instructions);
-    for (unsigned i = 0; i < instructions; i++) {
-      destroyer.masm()->int3();
-    }
-#endif
-    last_pc_offset = pc_offset;
     if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      last_pc_offset += gap_code_size;
-      Address call_pc = code->instruction_start() + last_pc_offset;
-      CodePatcher patcher(call_pc, patch_size());
-      Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
-      patcher.masm()->call(entry, RelocInfo::NONE);
-      last_pc_offset += patch_size();
-      RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
-                      reinterpret_cast<intptr_t>(entry));
+      // The gap code is needed to get to the state expected at the bailout.
+      curr_address += safepoint_entry.gap_code_size();
+
+      CodePatcher patcher(curr_address, patch_size());
+      Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
+      patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+
+      // We use RUNTIME_ENTRY for deoptimization bailouts.
+      RelocInfo rinfo(curr_address + 1,  // 1 after the call opcode.
+                      RelocInfo::RUNTIME_ENTRY,
+                      reinterpret_cast<intptr_t>(deopt_entry));
       reloc_info_writer.Write(&rinfo);
+
+      curr_address += patch_size();
     }
+    prev_address = curr_address;
   }
-#ifdef DEBUG
-  // Destroy the code which is not supposed to run again.
-  unsigned instructions = code->safepoint_table_start() - last_pc_offset;
-  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                        instructions);
-  for (unsigned i = 0; i < instructions; i++) {
-    destroyer.masm()->int3();
-  }
-#endif
+  ZapCodeRange(prev_address,
+               code_start_address + code->safepoint_table_offset());
 
   // Move the relocation info to the beginning of the byte array.
-  int reloc_size = end_address - reloc_info_writer.pos();
-  memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size);
+  int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+  memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
 
   // The relocation info is in place, update the size.
-  reloc_info->set_length(reloc_size);
+  reloc_info->set_length(new_reloc_size);
 
   // Handle the junk part after the new relocation info. We will create
   // a non-live object in the extra space at the end of the former reloc info.
-  Address junk = reloc_info->address() + reloc_info->Size();
-  ASSERT(junk <= end_address);
-
-  if (end_address - junk <= ByteArray::kHeaderSize) {
-    // We get in here if there is not enough space for a ByteArray.
-
-    // Both addresses are kPointerSize alligned.
-    CHECK_EQ((end_address - junk) % 4, 0);
-    Map* filler_map = Heap::one_pointer_filler_map();
-    while (junk < end_address) {
-      HeapObject::FromAddress(junk)->set_map(filler_map);
-      junk += kPointerSize;
-    }
-  } else {
-    int size = end_address - junk;
-    // Since the reloc_end address and junk are both alligned, we shouild,
-    // never have junk which is not a multipla of kPointerSize.
-    CHECK_EQ(size % kPointerSize, 0);
-    CHECK_GT(size, 0);
-    HeapObject* junk_object = HeapObject::FromAddress(junk);
-    junk_object->set_map(Heap::byte_array_map());
-    int length = ByteArray::LengthFor(end_address - junk);
-    ByteArray::cast(junk_object)->set_length(length);
-  }
+  Address junk_address = reloc_info->address() + reloc_info->Size();
+  ASSERT(junk_address <= reloc_end_address);
+  Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3ef8159..3c094a4 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -206,45 +206,48 @@
     Move(dot_arguments_slot, ecx, ebx, edx);
   }
 
-  { Comment cmnt(masm_, "[ Declarations");
-    // For named function expressions, declare the function name as a
-    // constant.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
-    }
-    // Visit all the explicit declarations unless there is an illegal
-    // redeclaration.
-    if (scope()->HasIllegalRedeclaration()) {
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      VisitDeclarations(scope()->declarations());
-    }
-  }
-
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
 
-  { Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailout(info->function(), NO_REGISTERS);
-    NearLabel ok;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit();
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(above_equal, &ok, taken);
-    StackCheckStub stub;
-    __ CallStub(&stub);
-    __ bind(&ok);
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    { Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+
+    { Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailout(info->function(), NO_REGISTERS);
+      NearLabel ok;
+      ExternalReference stack_limit =
+          ExternalReference::address_of_stack_limit();
+      __ cmp(esp, Operand::StaticVariable(stack_limit));
+      __ j(above_equal, &ok, taken);
+      StackCheckStub stub;
+      __ CallStub(&stub);
+      __ bind(&ok);
+    }
+
+    { Comment cmnt(masm_, "[ Body");
+      ASSERT(loop_depth() == 0);
+      VisitStatements(function()->body());
+      ASSERT(loop_depth() == 0);
+    }
   }
 
-  { Comment cmnt(masm_, "[ Body");
-    ASSERT(loop_depth() == 0);
-    VisitStatements(function()->body());
-    ASSERT(loop_depth() == 0);
-  }
-
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
   { Comment cmnt(masm_, "[ return <undefined>;");
-    // Emit a 'return undefined' in case control fell off the end of the body.
     __ mov(eax, Factory::undefined_value());
     EmitReturnSequence();
   }
@@ -610,7 +613,7 @@
   __ mov(location, src);
   // Emit the write barrier code if the location is in the heap.
   if (dst->type() == Slot::CONTEXT) {
-    int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+    int offset = Context::SlotOffset(dst->index());
     __ RecordWrite(scratch1, offset, src, scratch2);
   }
 }
@@ -666,10 +669,11 @@
         // We bypass the general EmitSlotSearch because we know more about
         // this specific context.
 
-        // The variable in the decl always resides in the current context.
+        // The variable in the decl always resides in the current function
+        // context.
         ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
-          // Check if we have the correct context pointer.
+          // Check that we're not inside a 'with'.
           __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
           __ cmp(ebx, Operand(esi));
           __ Check(equal, "Unexpected declaration in current context.");
@@ -1124,8 +1128,11 @@
   // Check that last extension is NULL.
   __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
   __ j(not_equal, slow);
-  __ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(temp, slot->index());
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an esi-based operand (the write barrier cannot be allowed to
+  // destroy the esi register).
+  return ContextOperand(context, slot->index());
 }
 
 
@@ -2000,57 +2007,75 @@
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
-  } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
-    // Perform the assignment for non-const variables and for initialization
-    // of const variables.  Const assignments are simply skipped.
-    Label done;
+  } else if (op == Token::INIT_CONST) {
+    // Like var declarations, const declarations are hoisted to function
+    // scope.  However, unlike var initializers, const initializers are able
+    // to drill a hole to that function context, even from inside a 'with'
+    // context.  We thus bypass the normal static scope lookup.
+    Slot* slot = var->AsSlot();
+    Label skip;
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+        // No const parameters.
+        UNREACHABLE();
+        break;
+      case Slot::LOCAL:
+        __ mov(edx, Operand(ebp, SlotOffset(slot)));
+        __ cmp(edx, Factory::the_hole_value());
+        __ j(not_equal, &skip);
+        __ mov(Operand(ebp, SlotOffset(slot)), eax);
+        break;
+      case Slot::CONTEXT: {
+        __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
+        __ mov(edx, ContextOperand(ecx, slot->index()));
+        __ cmp(edx, Factory::the_hole_value());
+        __ j(not_equal, &skip);
+        __ mov(ContextOperand(ecx, slot->index()), eax);
+        int offset = Context::SlotOffset(slot->index());
+        __ mov(edx, eax);  // Preserve the stored value in eax.
+        __ RecordWrite(ecx, offset, edx, ebx);
+        break;
+      }
+      case Slot::LOOKUP:
+        __ push(eax);
+        __ push(esi);
+        __ push(Immediate(var->name()));
+        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        break;
+    }
+    __ bind(&skip);
+
+  } else if (var->mode() != Variable::CONST) {
+    // Perform the assignment for non-const variables.  Const assignments
+    // are simply skipped.
     Slot* slot = var->AsSlot();
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (op == Token::INIT_CONST) {
-          // Detect const reinitialization by checking for the hole value.
-          __ mov(edx, Operand(ebp, SlotOffset(slot)));
-          __ cmp(edx, Factory::the_hole_value());
-          __ j(not_equal, &done);
-        }
         // Perform the assignment.
         __ mov(Operand(ebp, SlotOffset(slot)), eax);
         break;
 
       case Slot::CONTEXT: {
         MemOperand target = EmitSlotSearch(slot, ecx);
-        if (op == Token::INIT_CONST) {
-          // Detect const reinitialization by checking for the hole value.
-          __ mov(edx, target);
-          __ cmp(edx, Factory::the_hole_value());
-          __ j(not_equal, &done);
-        }
         // Perform the assignment and issue the write barrier.
         __ mov(target, eax);
         // The value of the assignment is in eax.  RecordWrite clobbers its
         // register arguments.
         __ mov(edx, eax);
-        int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+        int offset = Context::SlotOffset(slot->index());
         __ RecordWrite(ecx, offset, edx, ebx);
         break;
       }
 
       case Slot::LOOKUP:
-        // Call the runtime for the assignment.  The runtime will ignore
-        // const reinitialization.
+        // Call the runtime for the assignment.
         __ push(eax);  // Value.
         __ push(esi);  // Context.
         __ push(Immediate(var->name()));
-        if (op == Token::INIT_CONST) {
-          // The runtime will ignore const redeclaration.
-          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-        } else {
-          __ CallRuntime(Runtime::kStoreContextSlot, 3);
-        }
+        __ CallRuntime(Runtime::kStoreContextSlot, 3);
         break;
     }
-    __ bind(&done);
   }
 }
 
@@ -2270,7 +2295,9 @@
 
       // Push the receiver of the enclosing function and do runtime call.
       __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
-      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+      // Push the strict mode flag.
+      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index c136977..3fe8fdb 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -556,19 +556,15 @@
   __ ret(0);
 
   __ bind(&check_pixel_array);
-  // Check whether the elements is a pixel array.
-  // edx: receiver
-  // eax: key
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ mov(ebx, eax);
-  __ SmiUntag(ebx);
-  __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
-  __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
-  __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
-  __ SmiTag(eax);
-  __ ret(0);
+  GenerateFastPixelArrayLoad(masm,
+                             edx,
+                             eax,
+                             ecx,
+                             ebx,
+                             eax,
+                             &check_number_dictionary,
+                             NULL,
+                             &slow);
 
   __ bind(&check_number_dictionary);
   // Check whether the elements is a number dictionary.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index ae8fe8d..9e4bada 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -77,7 +77,7 @@
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
   code->set_stack_slots(StackSlotCount());
-  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
 
@@ -1914,10 +1914,21 @@
 
 
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
-  // TODO(antonm): load a context with a separate instruction.
+  Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
-  __ LoadContext(result, instr->context_chain_length());
-  __ mov(result, ContextOperand(result, instr->slot_index()));
+  __ mov(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  __ mov(ContextOperand(context, instr->slot_index()), value);
+  if (instr->needs_write_barrier()) {
+    Register temp = ToRegister(instr->TempAt(0));
+    int offset = Context::SlotOffset(instr->slot_index());
+    __ RecordWrite(context, offset, value, temp);
+  }
 }
 
 
@@ -2142,6 +2153,9 @@
   ASSERT(receiver.is(eax));
   v8::internal::ParameterCount actual(eax);
   __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+
+  // Restore context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -2155,16 +2169,31 @@
 }
 
 
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, esi);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+  __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
 }
 
 
 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register global = ToRegister(instr->global());
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+  __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
 }
 
 
@@ -3406,7 +3435,7 @@
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
-  bool pretenure = !instr->hydrogen()->pretenure();
+  bool pretenure = instr->hydrogen()->pretenure();
   if (shared_info->num_literals() == 0 && !pretenure) {
     FastNewClosureStub stub;
     __ push(Immediate(shared_info));
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 2070367..b31c4eb 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -29,6 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
+#include "lithium-allocator-inl.h"
 #include "ia32/lithium-ia32.h"
 #include "ia32/lithium-codegen-ia32.h"
 
@@ -68,11 +69,35 @@
 }
 
 
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as
+  // temporaries and outputs because all registers
+  // are blocked by the calling convention.
+  // Inputs can use either fixed register or have a short lifetime (be
+  // used at start of the instruction).
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           LUnallocated::cast(operand)->IsUsedAtStart() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+  for (TempIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+}
+#endif
+
+
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
-  if (HasResult()) {
-    PrintOutputOperandTo(stream);
-  }
+
+  PrintOutputOperandTo(stream);
 
   PrintDataTo(stream);
 
@@ -268,7 +293,15 @@
 
 
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
-  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  InputAt(1)->PrintTo(stream);
 }
 
 
@@ -391,7 +424,7 @@
 }
 
 
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
   LGap* gap = new LGap(block);
   int index = -1;
   if (instr->IsControl()) {
@@ -407,7 +440,6 @@
     pointer_maps_.Add(instr->pointer_map());
     instr->pointer_map()->set_lithium_position(index);
   }
-  return index;
 }
 
 
@@ -675,7 +707,10 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  allocator_->MarkAsCall();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
   if (hinstr->HasSideEffects()) {
@@ -700,7 +735,7 @@
 
 
 LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  allocator_->MarkAsSaveDoubles();
+  instr->MarkAsSaveDoubles();
   return instr;
 }
 
@@ -909,7 +944,6 @@
 void LChunkBuilder::VisitInstruction(HInstruction* current) {
   HInstruction* old_current = current_instruction_;
   current_instruction_ = current;
-  allocator_->BeginInstruction();
   if (current->has_position()) position_ = current->position();
   LInstruction* instr = current->CompileToLithium(this);
 
@@ -932,11 +966,7 @@
       instr->set_hydrogen_value(current);
     }
 
-    int index = chunk_->AddInstruction(instr, current_block_);
-    allocator_->SummarizeInstruction(index);
-  } else {
-    // This instruction should be omitted.
-    allocator_->OmitInstruction();
+    chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
 }
@@ -1140,13 +1170,26 @@
 }
 
 
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LOuterContext(context));
+}
+
+
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  return DefineAsRegister(new LGlobalObject);
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
-  return DefineAsRegister(new LGlobalReceiver);
+  LOperand* global_object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalReceiver(global_object));
 }
 
 
@@ -1658,7 +1701,25 @@
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
-  return DefineAsRegister(new LLoadContextSlot);
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  LOperand* temp;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+    temp = TempRegister();
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+    temp = NULL;
+  }
+  return new LStoreContextSlot(context, value, temp);
 }
 
 
@@ -1756,7 +1817,8 @@
   // We only need a scratch register if we have a write barrier or we
   // have a store into the properties array (not in-object-property).
   LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
-      ? TempRegister() : NULL;
+      ? TempRegister()
+      : NULL;
 
   return new LStoreNamedField(obj, val, temp);
 }
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 1adc13e..f643dac 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -39,118 +39,6 @@
 // Forward declarations.
 class LCodeGen;
 
-
-// Type hierarchy:
-//
-// LInstruction
-//   LTemplateInstruction
-//     LControlInstruction
-//       LBranch
-//       LClassOfTestAndBranch
-//       LCmpJSObjectEqAndBranch
-//       LCmpIDAndBranch
-//       LHasCachedArrayIndexAndBranch
-//       LHasInstanceTypeAndBranch
-//       LInstanceOfAndBranch
-//       LIsNullAndBranch
-//       LIsObjectAndBranch
-//       LIsSmiAndBranch
-//       LTypeofIsAndBranch
-//     LAccessArgumentsAt
-//     LArgumentsElements
-//     LArgumentsLength
-//     LAddI
-//     LApplyArguments
-//     LArithmeticD
-//     LArithmeticT
-//     LBitI
-//     LBoundsCheck
-//     LCmpID
-//     LCmpJSObjectEq
-//     LCmpT
-//     LDivI
-//     LInstanceOf
-//     LInstanceOfKnownGlobal
-//     LLoadKeyedFastElement
-//     LLoadKeyedGeneric
-//     LModI
-//     LMulI
-//     LPower
-//     LShiftI
-//     LSubI
-//     LCallConstantFunction
-//     LCallFunction
-//     LCallGlobal
-//     LCallKeyed
-//     LCallKnownGlobal
-//     LCallNamed
-//     LCallRuntime
-//     LCallStub
-//     LConstant
-//       LConstantD
-//       LConstantI
-//       LConstantT
-//     LDeoptimize
-//     LFunctionLiteral
-//     LGap
-//       LLabel
-//     LGlobalObject
-//     LGlobalReceiver
-//     LGoto
-//     LLazyBailout
-//     LLoadGlobal
-//     LCheckPrototypeMaps
-//     LLoadContextSlot
-//     LArrayLiteral
-//     LObjectLiteral
-//     LRegExpLiteral
-//     LOsrEntry
-//     LParameter
-//     LRegExpConstructResult
-//     LStackCheck
-//     LStoreKeyed
-//       LStoreKeyedFastElement
-//       LStoreKeyedGeneric
-//     LStoreNamed
-//       LStoreNamedField
-//       LStoreNamedGeneric
-//     LStringCharCodeAt
-//     LBitNotI
-//     LCallNew
-//     LCheckFunction
-//     LCheckPrototypeMaps
-//     LCheckInstanceType
-//     LCheckMap
-//     LCheckSmi
-//     LClassOfTest
-//     LDeleteProperty
-//     LDoubleToI
-//     LFixedArrayLength
-//     LHasCachedArrayIndex
-//     LHasInstanceType
-//     LInteger32ToDouble
-//     LIsNull
-//     LIsObject
-//     LIsSmi
-//     LJSArrayLength
-//     LLoadNamedField
-//     LLoadNamedGeneric
-//     LLoadFunctionPrototype
-//     LNumberTagD
-//     LNumberTagI
-//     LPushArgument
-//     LReturn
-//     LSmiTag
-//     LStoreGlobal
-//     LStringLength
-//     LTaggedToI
-//     LThrow
-//     LTypeof
-//     LTypeofIs
-//     LUnaryMathOperation
-//     LValueOf
-//     LUnknownOSRValue
-
 #define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
   V(ControlInstruction)                         \
   V(Constant)                                   \
@@ -187,6 +75,8 @@
   V(CheckMap)                                   \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
   V(CmpID)                                      \
   V(CmpIDAndBranch)                             \
   V(CmpJSObjectEq)                              \
@@ -197,16 +87,21 @@
   V(ConstantD)                                  \
   V(ConstantI)                                  \
   V(ConstantT)                                  \
+  V(Context)                                    \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
+  V(FixedArrayLength)                           \
   V(FunctionLiteral)                            \
   V(Gap)                                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(FixedArrayLength)                           \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
@@ -218,22 +113,16 @@
   V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
   V(JSArrayLength)                              \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
-  V(HasCachedArrayIndex)                        \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(ClassOfTest)                                \
-  V(ClassOfTestAndBranch)                       \
   V(Label)                                      \
   V(LazyBailout)                                \
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
+  V(LoadFunctionPrototype)                      \
   V(LoadGlobal)                                 \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadNamedField)                             \
   V(LoadNamedGeneric)                           \
-  V(LoadFunctionPrototype)                      \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -241,6 +130,7 @@
   V(NumberUntagD)                               \
   V(ObjectLiteral)                              \
   V(OsrEntry)                                   \
+  V(OuterContext)                               \
   V(Parameter)                                  \
   V(Power)                                      \
   V(PushArgument)                               \
@@ -250,6 +140,7 @@
   V(SmiTag)                                     \
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
+  V(StoreContextSlot)                           \
   V(StoreGlobal)                                \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
@@ -291,7 +182,10 @@
 class LInstruction: public ZoneObject {
  public:
   LInstruction()
-      : hydrogen_value_(NULL) { }
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        is_call_(false),
+        is_save_doubles_(false) { }
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -308,15 +202,14 @@
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
-  void set_environment(LEnvironment* env) { environment_.set(env); }
-  LEnvironment* environment() const { return environment_.get(); }
-  bool HasEnvironment() const { return environment_.is_set(); }
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
 
   void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
   LPointerMap* pointer_map() const { return pointer_map_.get(); }
   bool HasPointerMap() const { return pointer_map_.is_set(); }
 
-  virtual bool HasResult() const = 0;
 
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -331,11 +224,35 @@
     return deoptimization_environment_.is_set();
   }
 
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
  private:
-  SetOncePointer<LEnvironment> environment_;
+  LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
   SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
@@ -362,6 +279,11 @@
  public:
   int length() { return 0; }
   void PrintOperandsTo(StringStream* stream) { }
+  ElementType& operator[](int i) {
+    UNREACHABLE();
+    static ElementType t = 0;
+    return t;
+  }
 };
 
 
@@ -1286,18 +1208,42 @@
 };
 
 
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
   DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
 
-  int context_chain_length() { return hydrogen()->context_chain_length(); }
+  LOperand* context() { return InputAt(0); }
   int slot_index() { return hydrogen()->slot_index(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
 
 
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* value() { return InputAt(1); }
+  int slot_index() { return hydrogen()->slot_index(); }
+  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LPushArgument: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
@@ -1308,15 +1254,45 @@
 };
 
 
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
 };
 
 
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LOuterContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalObject(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalReceiver(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+  LOperand* global() { return InputAt(0); }
 };
 
 
@@ -1815,7 +1791,7 @@
       pointer_maps_(8),
       inlined_closures_(1) { }
 
-  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
   LConstantOperand* DefineConstantOperand(HConstant* constant);
   Handle<Object> LookupLiteral(LConstantOperand* operand) const;
   Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a4c9b11..ee4e3d9 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1523,11 +1523,21 @@
       mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
       mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
     }
-    // The context may be an intermediate context, not a function context.
-    mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-  } else {  // Slot is in the current function context.
-    // The context may be an intermediate context, not a function context.
-    mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    mov(dst, esi);
+  }
+
+  // We should not have found a 'with' context by walking the context chain
+  // (i.e., the static scope chain and runtime context chain do not agree).
+  // A variable occurring in such a scope should have slot type LOOKUP and
+  // not CONTEXT.
+  if (FLAG_debug_code) {
+    cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+    Check(equal, "Yo dawg, I heard you liked function contexts "
+                 "so I put function contexts in all your contexts");
   }
 }
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6f180c6..12a8923 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,17 @@
     j(not_carry, is_smi);
   }
 
+  // Jump the register contains a smi.
+  inline void JumpIfSmi(Register value, Label* smi_label) {
+    test(value, Immediate(kSmiTagMask));
+    j(zero, smi_label, not_taken);
+  }
+  // Jump if register contain a non-smi.
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+    test(value, Immediate(kSmiTagMask));
+    j(not_zero, not_smi_label, not_taken);
+  }
+
   // Assumes input is a heap object.
   void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 6de9a41..f96ef5c 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -3155,6 +3155,37 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map matches.
+  __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
+
+  GenerateFastPixelArrayLoad(masm(),
+                             edx,
+                             eax,
+                             ecx,
+                             ebx,
+                             eax,
+                             &miss,
+                             &miss,
+                             &miss);
+
+  // Handle load cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
diff --git a/src/ic.cc b/src/ic.cc
index 9a277d6..8e282ad 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1204,23 +1204,31 @@
 
   if (use_ic) {
     Code* stub = generic_stub();
-    if (object->IsString() && key->IsNumber()) {
-      stub = string_stub();
-    } else if (object->IsJSObject()) {
-      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->HasExternalArrayElements()) {
-        MaybeObject* probe =
-            StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false);
-        stub =
-            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
-      } else if (receiver->HasIndexedInterceptor()) {
-        stub = indexed_interceptor_stub();
-      } else if (state == UNINITIALIZED &&
-                 key->IsSmi() &&
-                 receiver->map()->has_fast_elements()) {
-        MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver);
-        stub =
-            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
+    if (state == UNINITIALIZED) {
+      if (object->IsString() && key->IsNumber()) {
+        stub = string_stub();
+      } else if (object->IsJSObject()) {
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+        if (receiver->HasExternalArrayElements()) {
+          MaybeObject* probe =
+              StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
+                                                              false);
+          stub = probe->IsFailure() ?
+              NULL : Code::cast(probe->ToObjectUnchecked());
+        } else if (receiver->HasIndexedInterceptor()) {
+          stub = indexed_interceptor_stub();
+        } else if (receiver->HasPixelElements()) {
+          MaybeObject* probe =
+              StubCache::ComputeKeyedLoadPixelArray(*receiver);
+          stub = probe->IsFailure() ?
+              NULL : Code::cast(probe->ToObjectUnchecked());
+        } else if (key->IsSmi() &&
+                   receiver->map()->has_fast_elements()) {
+          MaybeObject* probe =
+              StubCache::ComputeKeyedLoadSpecialized(*receiver);
+          stub = probe->IsFailure() ?
+              NULL : Code::cast(probe->ToObjectUnchecked());
+        }
       }
     }
     if (stub != NULL) set_target(stub);
@@ -2053,6 +2061,8 @@
   }
 
   if (left_type.IsInteger32() && right_type.IsInteger32()) {
+    // Platforms with 32-bit Smis have no distinct INT32 type.
+    if (kSmiValueSize == 32) return SMI;
     return INT32;
   }
 
@@ -2096,9 +2106,11 @@
   }
   if (type == TRBinaryOpIC::SMI &&
       previous_type == TRBinaryOpIC::SMI) {
-    if (op == Token::DIV || op == Token::MUL) {
+    if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
       // Arithmetic on two Smi inputs has yielded a heap number.
       // That is the only way to get here from the Smi stub.
+      // With 32-bit Smis, all overflows give heap numbers, but with
+      // 31-bit Smis, most operations overflow to int32 results.
       result_type = TRBinaryOpIC::HEAP_NUMBER;
     } else {
       // Other operations on SMIs that overflow yield int32s.
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
new file mode 100644
index 0000000..84c5bbd
--- /dev/null
+++ b/src/lithium-allocator-inl.h
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
+#define V8_LITHIUM_ALLOCATOR_INL_H_
+
+#include "lithium-allocator.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
+
+
+LInstruction* LAllocator::InstructionAt(int index) {
+  return chunk_->instructions()->at(index);
+}
+
+
+LGap* LAllocator::GapAt(int index) {
+  return chunk_->GetGapAt(index);
+}
+
+
+TempIterator::TempIterator(LInstruction* instr)
+    : instr_(instr),
+      limit_(instr->TempCount()),
+      current_(0) {
+  current_ = AdvanceToNext(0);
+}
+
+
+bool TempIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* TempIterator::Next() {
+  ASSERT(HasNext());
+  return instr_->TempAt(current_);
+}
+
+
+int TempIterator::AdvanceToNext(int start) {
+  while (start < limit_ && instr_->TempAt(start) == NULL) start++;
+  return start;
+}
+
+
+void TempIterator::Advance() {
+  current_ = AdvanceToNext(current_ + 1);
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+    : instr_(instr),
+      limit_(instr->InputCount()),
+      current_(0) {
+  current_ = AdvanceToNext(0);
+}
+
+
+bool InputIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* InputIterator::Next() {
+  ASSERT(HasNext());
+  return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+  current_ = AdvanceToNext(current_ + 1);
+}
+
+
+int InputIterator::AdvanceToNext(int start) {
+  while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
+  return start;
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+    : input_iterator_(instr), env_iterator_(instr->environment()) { }
+
+
+bool UseIterator::HasNext() {
+  return input_iterator_.HasNext() || env_iterator_.HasNext();
+}
+
+
+LOperand* UseIterator::Next() {
+  ASSERT(HasNext());
+  return input_iterator_.HasNext()
+      ? input_iterator_.Next()
+      : env_iterator_.Next();
+}
+
+
+void UseIterator::Advance() {
+  input_iterator_.HasNext()
+      ? input_iterator_.Advance()
+      : env_iterator_.Advance();
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 81877f3..9f5f1b9 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "lithium-allocator.h"
+#include "lithium-allocator-inl.h"
 
 #include "hydrogen.h"
 #include "string-stream.h"
@@ -532,7 +532,7 @@
 
 void LAllocator::InitializeLivenessAnalysis() {
   // Initialize the live_in sets for each block to NULL.
-  int block_count = graph()->blocks()->length();
+  int block_count = graph_->blocks()->length();
   live_in_sets_.Initialize(block_count);
   live_in_sets_.AddBlock(NULL, block_count);
 }
@@ -613,7 +613,7 @@
   }
   if (is_tagged) {
     TraceAlloc("Fixed reg is tagged at %d\n", pos);
-    LInstruction* instr = chunk_->instructions()->at(pos);
+    LInstruction* instr = InstructionAt(pos);
     if (instr->HasPointerMap()) {
       instr->pointer_map()->RecordPointer(operand);
     }
@@ -668,17 +668,17 @@
 }
 
 
-LGap* LAllocator::GetLastGap(HBasicBlock* block) const {
+LGap* LAllocator::GetLastGap(HBasicBlock* block) {
   int last_instruction = block->last_instruction_index();
   int index = chunk_->NearestGapPos(last_instruction);
-  return chunk_->GetGapAt(index);
+  return GapAt(index);
 }
 
 
 HPhi* LAllocator::LookupPhi(LOperand* operand) const {
   if (!operand->IsUnallocated()) return NULL;
   int index = operand->VirtualRegister();
-  HValue* instr = graph()->LookupValue(index);
+  HValue* instr = graph_->LookupValue(index);
   if (instr != NULL && instr->IsPhi()) {
     return HPhi::cast(instr);
   }
@@ -737,7 +737,7 @@
 void LAllocator::AddConstraintsGapMove(int index,
                                        LOperand* from,
                                        LOperand* to) {
-  LGap* gap = chunk_->GetGapAt(index);
+  LGap* gap = GapAt(index);
   LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
   if (from->IsUnallocated()) {
     const ZoneList<LMoveOperands>* move_operands = move->move_operands();
@@ -760,24 +760,24 @@
   int start = block->first_instruction_index();
   int end = block->last_instruction_index();
   for (int i = start; i <= end; ++i) {
-    if (chunk_->IsGapAt(i)) {
-      InstructionSummary* summary = NULL;
-      InstructionSummary* prev_summary = NULL;
-      if (i < end) summary = GetSummary(i + 1);
-      if (i > start) prev_summary = GetSummary(i - 1);
-      MeetConstraintsBetween(prev_summary, summary, i);
+    if (IsGapAt(i)) {
+      LInstruction* instr = NULL;
+      LInstruction* prev_instr = NULL;
+      if (i < end) instr = InstructionAt(i + 1);
+      if (i > start) prev_instr = InstructionAt(i - 1);
+      MeetConstraintsBetween(prev_instr, instr, i);
     }
   }
 }
 
 
-void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
-                                        InstructionSummary* second,
+void LAllocator::MeetConstraintsBetween(LInstruction* first,
+                                        LInstruction* second,
                                         int gap_index) {
   // Handle fixed temporaries.
   if (first != NULL) {
-    for (int i = 0; i < first->TempCount(); ++i) {
-      LUnallocated* temp = LUnallocated::cast(first->TempAt(i));
+    for (TempIterator it(first); it.HasNext(); it.Advance()) {
+      LUnallocated* temp = LUnallocated::cast(it.Next());
       if (temp->HasFixedPolicy()) {
         AllocateFixed(temp, gap_index - 1, false);
       }
@@ -810,7 +810,7 @@
       // and splitting of live ranges do not account for it.
       // Thus it should be inserted to a lifetime position corresponding to
       // the instruction end.
-      LGap* gap = chunk_->GetGapAt(gap_index);
+      LGap* gap = GapAt(gap_index);
       LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
       move->AddMove(first_output, range->GetSpillOperand());
     }
@@ -818,8 +818,8 @@
 
   // Handle fixed input operands of second instruction.
   if (second != NULL) {
-    for (int i = 0; i < second->InputCount(); ++i) {
-      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i));
+    for (UseIterator it(second); it.HasNext(); it.Advance()) {
+      LUnallocated* cur_input = LUnallocated::cast(it.Next());
       if (cur_input->HasFixedPolicy()) {
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
         bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -848,7 +848,7 @@
   if (second != NULL && second->Output() != NULL) {
     LUnallocated* second_output = LUnallocated::cast(second->Output());
     if (second_output->HasSameAsInputPolicy()) {
-      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0));
+      LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
       int output_vreg = second_output->VirtualRegister();
       int input_vreg = cur_input->VirtualRegister();
 
@@ -858,7 +858,7 @@
 
       if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
         int index = gap_index + 1;
-        LInstruction* instr = chunk_->instructions()->at(index);
+        LInstruction* instr = InstructionAt(index);
         if (instr->HasPointerMap()) {
           instr->pointer_map()->RecordPointer(input_copy);
         }
@@ -886,9 +886,9 @@
     LifetimePosition curr_position =
         LifetimePosition::FromInstructionIndex(index);
 
-    if (chunk_->IsGapAt(index)) {
+    if (IsGapAt(index)) {
       // We have a gap at this position.
-      LGap* gap = chunk_->GetGapAt(index);
+      LGap* gap = GapAt(index);
       LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
       const ZoneList<LMoveOperands>* move_operands = move->move_operands();
       for (int i = 0; i < move_operands->length(); ++i) {
@@ -922,17 +922,17 @@
         }
       }
     } else {
-      ASSERT(!chunk_->IsGapAt(index));
-      InstructionSummary* summary = GetSummary(index);
+      ASSERT(!IsGapAt(index));
+      LInstruction* instr = InstructionAt(index);
 
-      if (summary != NULL) {
-        LOperand* output = summary->Output();
+      if (instr != NULL) {
+        LOperand* output = instr->Output();
         if (output != NULL) {
           if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
           Define(curr_position, output, NULL);
         }
 
-        if (summary->IsCall()) {
+        if (instr->IsMarkedAsCall()) {
           for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsRegister() ||
                 output->index() != i) {
@@ -943,7 +943,7 @@
           }
         }
 
-        if (summary->IsCall() || summary->IsSaveDoubles()) {
+        if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
           for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsDoubleRegister() ||
                 output->index() != i) {
@@ -954,8 +954,8 @@
           }
         }
 
-        for (int i = 0; i < summary->InputCount(); ++i) {
-          LOperand* input = summary->InputAt(i);
+        for (UseIterator it(instr); it.HasNext(); it.Advance()) {
+          LOperand* input = it.Next();
 
           LifetimePosition use_pos;
           if (input->IsUnallocated() &&
@@ -969,9 +969,9 @@
           if (input->IsUnallocated()) live->Add(input->VirtualRegister());
         }
 
-        for (int i = 0; i < summary->TempCount(); ++i) {
-          LOperand* temp = summary->TempAt(i);
-          if (summary->IsCall()) {
+        for (TempIterator it(instr); it.HasNext(); it.Advance()) {
+          LOperand* temp = it.Next();
+          if (instr->IsMarkedAsCall()) {
             if (temp->IsRegister()) continue;
             if (temp->IsUnallocated()) {
               LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1042,9 +1042,9 @@
 
 
 void LAllocator::MeetRegisterConstraints() {
-  HPhase phase("Register constraints", chunk());
+  HPhase phase("Register constraints", chunk_);
   first_artificial_register_ = next_virtual_register_;
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int i = 0; i < blocks->length(); ++i) {
     HBasicBlock* block = blocks->at(i);
     MeetRegisterConstraints(block);
@@ -1053,10 +1053,10 @@
 
 
 void LAllocator::ResolvePhis() {
-  HPhase phase("Resolve phis", chunk());
+  HPhase phase("Resolve phis", chunk_);
 
   // Process the blocks in reverse order.
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
     HBasicBlock* block = blocks->at(block_id);
     ResolvePhis(block);
@@ -1094,7 +1094,7 @@
     if (!pred_op->Equals(cur_op)) {
       LGap* gap = NULL;
       if (block->predecessors()->length() == 1) {
-        gap = chunk_->GetGapAt(block->first_instruction_index());
+        gap = GapAt(block->first_instruction_index());
       } else {
         ASSERT(pred->end()->SecondSuccessor() == NULL);
         gap = GetLastGap(pred);
@@ -1107,19 +1107,19 @@
 
 LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
   int index = pos.InstructionIndex();
-  if (chunk_->IsGapAt(index)) {
-    LGap* gap = chunk_->GetGapAt(index);
+  if (IsGapAt(index)) {
+    LGap* gap = GapAt(index);
     return gap->GetOrCreateParallelMove(
         pos.IsInstructionStart() ? LGap::START : LGap::END);
   }
   int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
-  return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove(
+  return GapAt(gap_pos)->GetOrCreateParallelMove(
       (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
 }
 
 
 HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
-  LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+  LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
   return gap->block();
 }
 
@@ -1166,7 +1166,7 @@
 
 void LAllocator::ResolveControlFlow() {
   HPhase phase("Resolve control flow", this);
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = 1; block_id < blocks->length(); ++block_id) {
     HBasicBlock* block = blocks->at(block_id);
     if (CanEagerlyResolveControlFlow(block)) continue;
@@ -1189,7 +1189,7 @@
   HPhase phase("Build live ranges", this);
   InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
     HBasicBlock* block = blocks->at(block_id);
     BitVector* live = ComputeLiveOut(block);
@@ -1264,7 +1264,7 @@
         found = true;
         int operand_index = iterator.Current();
         PrintF("Function: %s\n",
-               *graph()->info()->function()->debug_name()->ToCString());
+               *graph_->info()->function()->debug_name()->ToCString());
         PrintF("Value %d used before first definition!\n", operand_index);
         LiveRange* range = LiveRangeFor(operand_index);
         PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1469,7 +1469,7 @@
     if (current->HasAllocatedSpillOperand()) {
       TraceAlloc("Live range %d already has a spill operand\n", current->id());
       LifetimePosition next_pos = position;
-      if (chunk_->IsGapAt(next_pos.InstructionIndex())) {
+      if (IsGapAt(next_pos.InstructionIndex())) {
         next_pos = next_pos.NextInstruction();
       }
       UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
@@ -1556,14 +1556,8 @@
 }
 
 
-void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
-  operand->set_virtual_register(value->id());
-  current_summary()->AddInput(operand);
-}
-
-
 bool LAllocator::HasTaggedValue(int virtual_register) const {
-  HValue* value = graph()->LookupValue(virtual_register);
+  HValue* value = graph_->LookupValue(virtual_register);
   if (value == NULL) return false;
   return value->representation().IsTagged();
 }
@@ -1571,7 +1565,7 @@
 
 RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
   if (virtual_register < first_artificial_register_) {
-    HValue* value = graph()->LookupValue(virtual_register);
+    HValue* value = graph_->LookupValue(virtual_register);
     if (value != NULL && value->representation().IsDouble()) {
       return DOUBLE_REGISTERS;
     }
@@ -1584,39 +1578,8 @@
 }
 
 
-void LAllocator::MarkAsCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs can use either fixed register or have a short lifetime (be
-  // used at start of the instruction).
-  InstructionSummary* summary = current_summary();
-#ifdef DEBUG
-  ASSERT(summary->Output() == NULL ||
-         LUnallocated::cast(summary->Output())->HasFixedPolicy() ||
-         !LUnallocated::cast(summary->Output())->HasRegisterPolicy());
-  for (int i = 0; i < summary->InputCount(); i++) {
-    ASSERT(LUnallocated::cast(summary->InputAt(i))->HasFixedPolicy() ||
-           LUnallocated::cast(summary->InputAt(i))->IsUsedAtStart() ||
-           !LUnallocated::cast(summary->InputAt(i))->HasRegisterPolicy());
-  }
-  for (int i = 0; i < summary->TempCount(); i++) {
-    ASSERT(LUnallocated::cast(summary->TempAt(i))->HasFixedPolicy() ||
-           !LUnallocated::cast(summary->TempAt(i))->HasRegisterPolicy());
-  }
-#endif
-  summary->MarkAsCall();
-}
-
-
-void LAllocator::MarkAsSaveDoubles() {
-  current_summary()->MarkAsSaveDoubles();
-}
-
-
 void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
   operand->set_virtual_register(instr->id());
-  current_summary()->SetOutput(operand);
 }
 
 
@@ -1625,7 +1588,11 @@
   if (!operand->HasFixedPolicy()) {
     operand->set_virtual_register(next_virtual_register_++);
   }
-  current_summary()->AddTemp(operand);
+}
+
+
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+  operand->set_virtual_register(value->id());
 }
 
 
@@ -1634,34 +1601,6 @@
 }
 
 
-void LAllocator::BeginInstruction() {
-  if (next_summary_ == NULL) {
-    next_summary_ = new InstructionSummary();
-  }
-  summary_stack_.Add(next_summary_);
-  next_summary_ = NULL;
-}
-
-
-void LAllocator::SummarizeInstruction(int index) {
-  InstructionSummary* sum = summary_stack_.RemoveLast();
-  if (summaries_.length() <= index) {
-    summaries_.AddBlock(NULL, index + 1 - summaries_.length());
-  }
-  ASSERT(summaries_[index] == NULL);
-  if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
-    summaries_[index] = sum;
-  } else {
-    next_summary_ = sum;
-  }
-}
-
-
-void LAllocator::OmitInstruction() {
-  summary_stack_.RemoveLast();
-}
-
-
 void LAllocator::AddToActive(LiveRange* range) {
   TraceAlloc("Add live range %d to active\n", range->id());
   active_live_ranges_.Add(range);
@@ -2007,7 +1946,7 @@
 
 bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
   return pos.IsInstructionStart() &&
-      chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+      InstructionAt(pos.InstructionIndex())->IsLabel();
 }
 
 
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 83f5583..914a5b6 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -31,6 +31,7 @@
 #include "v8.h"
 
 #include "data-flow.h"
+#include "lithium.h"
 #include "zone.h"
 
 namespace v8 {
@@ -153,52 +154,55 @@
 
 // A register-allocator view of a Lithium instruction. It contains the id of
 // the output operand and a list of input operand uses.
-class InstructionSummary: public ZoneObject {
+
+class LInstruction;
+class LEnvironment;
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
  public:
-  InstructionSummary()
-      : output_operand_(NULL),
-        input_count_(0),
-        operands_(4),
-        is_call_(false),
-        is_save_doubles_(false) {}
-
-  // Output operands.
-  LOperand* Output() const { return output_operand_; }
-  void SetOutput(LOperand* output) {
-    ASSERT(output_operand_ == NULL);
-    output_operand_ = output;
-  }
-
-  // Input operands.
-  int InputCount() const { return input_count_; }
-  LOperand* InputAt(int i) const {
-    ASSERT(i < input_count_);
-    return operands_[i];
-  }
-  void AddInput(LOperand* input) {
-    operands_.InsertAt(input_count_, input);
-    input_count_++;
-  }
-
-  // Temporary operands.
-  int TempCount() const { return operands_.length() - input_count_; }
-  LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
-  void AddTemp(LOperand* temp) { operands_.Add(temp); }
-
-  void MarkAsCall() { is_call_ = true; }
-  bool IsCall() const { return is_call_; }
-
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
-  bool IsSaveDoubles() const { return is_save_doubles_; }
+  inline explicit TempIterator(LInstruction* instr);
+  inline bool HasNext();
+  inline LOperand* Next();
+  inline void Advance();
 
  private:
-  LOperand* output_operand_;
-  int input_count_;
-  ZoneList<LOperand*> operands_;
-  bool is_call_;
-  bool is_save_doubles_;
+  inline int AdvanceToNext(int start);
+  LInstruction* instr_;
+  int limit_;
+  int current_;
 };
 
+
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+  inline explicit InputIterator(LInstruction* instr);
+  inline bool HasNext();
+  inline LOperand* Next();
+  inline void Advance();
+
+ private:
+  inline int AdvanceToNext(int start);
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+class UseIterator BASE_EMBEDDED {
+ public:
+  inline explicit UseIterator(LInstruction* instr);
+  inline bool HasNext();
+  inline LOperand* Next();
+  inline void Advance();
+
+ private:
+  InputIterator input_iterator_;
+  DeepIterator env_iterator_;
+};
+
+
 // Representation of the non-empty interval [start,end[.
 class UseInterval: public ZoneObject {
  public:
@@ -428,9 +432,6 @@
  public:
   explicit LAllocator(int first_virtual_register, HGraph* graph)
       : chunk_(NULL),
-        summaries_(0),
-        next_summary_(NULL),
-        summary_stack_(2),
         live_in_sets_(0),
         live_ranges_(16),
         fixed_live_ranges_(8),
@@ -457,27 +458,12 @@
   // Record a temporary operand.
   void RecordTemporary(LUnallocated* operand);
 
-  // Marks the current instruction as a call.
-  void MarkAsCall();
-
-  // Marks the current instruction as requiring saving double registers.
-  void MarkAsSaveDoubles();
-
   // Checks whether the value of a given virtual register is tagged.
   bool HasTaggedValue(int virtual_register) const;
 
   // Returns the register kind required by the given virtual register.
   RegisterKind RequiredRegisterKind(int virtual_register) const;
 
-  // Begin a new instruction.
-  void BeginInstruction();
-
-  // Summarize the current instruction.
-  void SummarizeInstruction(int index);
-
-  // Summarize the current instruction.
-  void OmitInstruction();
-
   // Control max function size.
   static int max_initial_value_ids();
 
@@ -525,8 +511,8 @@
   void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
   void ProcessInstructions(HBasicBlock* block, BitVector* live);
   void MeetRegisterConstraints(HBasicBlock* block);
-  void MeetConstraintsBetween(InstructionSummary* first,
-                              InstructionSummary* second,
+  void MeetConstraintsBetween(LInstruction* first,
+                              LInstruction* second,
                               int gap_index);
   void ResolvePhis(HBasicBlock* block);
 
@@ -604,12 +590,6 @@
   // Return the block which contains give lifetime position.
   HBasicBlock* GetBlock(LifetimePosition pos);
 
-  // Current active summary.
-  InstructionSummary* current_summary() const { return summary_stack_.last(); }
-
-  // Get summary for given instruction index.
-  InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
-
   // Helper methods for the fixed registers.
   int RegisterCount() const;
   static int FixedLiveRangeID(int index) { return -index - 1; }
@@ -618,15 +598,17 @@
   LiveRange* FixedDoubleLiveRangeFor(int index);
   LiveRange* LiveRangeFor(int index);
   HPhi* LookupPhi(LOperand* operand) const;
-  LGap* GetLastGap(HBasicBlock* block) const;
+  LGap* GetLastGap(HBasicBlock* block);
 
   const char* RegisterName(int allocation_index);
 
-  LChunk* chunk_;
-  ZoneList<InstructionSummary*> summaries_;
-  InstructionSummary* next_summary_;
+  inline bool IsGapAt(int index);
 
-  ZoneList<InstructionSummary*> summary_stack_;
+  inline LInstruction* InstructionAt(int index);
+
+  inline LGap* GapAt(int index);
+
+  LChunk* chunk_;
 
   // During liveness analysis keep a mapping from block id to live_in sets
   // for blocks already analyzed.
diff --git a/src/lithium.h b/src/lithium.h
index e1b6fc0..a2f9df0 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -509,6 +509,82 @@
   friend class LCodegen;
 };
 
+
+// Iterates over the non-null, non-constant operands in an environment.
+class ShallowIterator BASE_EMBEDDED {
+ public:
+  explicit ShallowIterator(LEnvironment* env)
+      : env_(env),
+        limit_(env != NULL ? env->values()->length() : 0),
+        current_(0) {
+    current_ = AdvanceToNext(0);
+  }
+
+  inline bool HasNext() {
+    return env_ != NULL && current_ < limit_;
+  }
+
+  inline LOperand* Next() {
+    ASSERT(HasNext());
+    return env_->values()->at(current_);
+  }
+
+  inline void Advance() {
+    current_ = AdvanceToNext(current_ + 1);
+  }
+
+  inline LEnvironment* env() { return env_; }
+
+ private:
+  inline int AdvanceToNext(int start) {
+    while (start < limit_ &&
+           (env_->values()->at(start) == NULL ||
+            env_->values()->at(start)->IsConstantOperand())) {
+      start++;
+    }
+    return start;
+  }
+
+  LEnvironment* env_;
+  int limit_;
+  int current_;
+};
+
+
+// Iterator for non-null, non-constant operands incl. outer environments.
+class DeepIterator BASE_EMBEDDED {
+ public:
+  explicit DeepIterator(LEnvironment* env)
+      : current_iterator_(env) { }
+
+  inline bool HasNext() {
+    if (current_iterator_.HasNext()) return true;
+    if (current_iterator_.env() == NULL) return false;
+    AdvanceToOuter();
+    return current_iterator_.HasNext();
+  }
+
+  inline LOperand* Next() {
+    ASSERT(current_iterator_.HasNext());
+    return current_iterator_.Next();
+  }
+
+  inline void Advance() {
+    if (current_iterator_.HasNext()) {
+      current_iterator_.Advance();
+    } else {
+      AdvanceToOuter();
+    }
+  }
+
+ private:
+  inline void AdvanceToOuter() {
+    current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+  }
+
+  ShallowIterator current_iterator_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/messages.js b/src/messages.js
index 4c3d1e7..d22ac65 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -38,10 +38,6 @@
 var COMPILATION_TYPE_EVAL = 1;
 var COMPILATION_TYPE_JSON = 2;
 
-// Lazily initialized.
-var kVowelSounds = 0;
-var kCapitalVowelSounds = 0;
-
 // Matches Messages::kNoLineNumberInfo from v8.h
 var kNoLineNumberInfo = 0;
 
@@ -52,8 +48,7 @@
 
 var kMessages = 0;
 
-var kReplacementMarkers =
-    [ "%0", "%1", "%2", "%3" ]
+var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
 
 function FormatString(format, message) {
   var args = %MessageGetArguments(message);
@@ -152,6 +147,7 @@
       unexpected_token_number:      ["Unexpected number"],
       unexpected_token_string:      ["Unexpected string"],
       unexpected_token_identifier:  ["Unexpected identifier"],
+      unexpected_strict_reserved:   ["Unexpected strict mode reserved word"],
       unexpected_eos:               ["Unexpected end of input"],
       malformed_regexp:             ["Invalid regular expression: /", "%0", "/: ", "%1"],
       unterminated_regexp:          ["Invalid regular expression: missing /"],
@@ -226,6 +222,7 @@
       strict_lhs_assignment:        ["Assignment to eval or arguments is not allowed in strict mode"],
       strict_lhs_postfix:           ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
       strict_lhs_prefix:            ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+      strict_reserved_word:         ["Use of future reserved word in strict mode"],
     };
   }
   var message_type = %MessageGetType(message);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 50f4031..3b83dd4 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2510,29 +2510,29 @@
 }
 
 
-unsigned Code::safepoint_table_start() {
+unsigned Code::safepoint_table_offset() {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
-  return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+  return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
 }
 
 
-void Code::set_safepoint_table_start(unsigned offset) {
+void Code::set_safepoint_table_offset(unsigned offset) {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
-  WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+  WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
 }
 
 
-unsigned Code::stack_check_table_start() {
+unsigned Code::stack_check_table_offset() {
   ASSERT(kind() == FUNCTION);
-  return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+  return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
 }
 
 
-void Code::set_stack_check_table_start(unsigned offset) {
+void Code::set_stack_check_table_offset(unsigned offset) {
   ASSERT(kind() == FUNCTION);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
-  WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+  WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
 }
 
 
@@ -2993,6 +2993,18 @@
 }
 
 
+bool SharedFunctionInfo::strict_mode() {
+  return BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+
+void SharedFunctionInfo::set_strict_mode(bool value) {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kStrictModeFunction,
+                                     value));
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
diff --git a/src/objects.cc b/src/objects.cc
index 8bced58..775487a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1213,6 +1213,8 @@
 MaybeObject* JSObject::AddFastProperty(String* name,
                                        Object* value,
                                        PropertyAttributes attributes) {
+  ASSERT(!IsJSGlobalProxy());
+
   // Normalize the object if the name is an actual string (not the
   // hidden symbols) and is not a real identifier.
   StringInputBuffer buffer(name);
@@ -2288,6 +2290,9 @@
   // The global object is always normalized.
   ASSERT(!IsGlobalObject());
 
+  // JSGlobalProxy must never be normalized
+  ASSERT(!IsJSGlobalProxy());
+
   // Allocate new content.
   int property_count = map()->NumberOfDescribedProperties();
   if (expected_additional_properties > 0) {
@@ -5920,7 +5925,7 @@
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
     } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
+      Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
       it.rinfo()->set_target_cell(*cell);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
@@ -6001,7 +6006,7 @@
 void Code::SetNoStackCheckTable() {
   // Indicate the absence of a stack-check table by a table start after the
   // end of the instructions.  Table start must be aligned, so round up.
-  set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+  set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
 }
 
 
@@ -6278,7 +6283,7 @@
     }
     PrintF(out, "\n");
   } else if (kind() == FUNCTION) {
-    unsigned offset = stack_check_table_start();
+    unsigned offset = stack_check_table_offset();
     // If there is no stack check table, the "table start" will at or after
     // (due to alignment) the end of the instruction stream.
     if (static_cast<int>(offset) < instruction_size()) {
@@ -6679,6 +6684,13 @@
     return UNDEFINED_ELEMENT;
   }
 
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return UNDEFINED_ELEMENT;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->HasLocalElement(index);
+  }
+
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
     return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
@@ -7986,20 +7998,28 @@
 // StringSharedKeys are used as keys in the eval cache.
 class StringSharedKey : public HashTableKey {
  public:
-  StringSharedKey(String* source, SharedFunctionInfo* shared)
-      : source_(source), shared_(shared) { }
+  StringSharedKey(String* source,
+                  SharedFunctionInfo* shared,
+                  StrictModeFlag strict_mode)
+      : source_(source),
+        shared_(shared),
+        strict_mode_(strict_mode) { }
 
   bool IsMatch(Object* other) {
     if (!other->IsFixedArray()) return false;
     FixedArray* pair = FixedArray::cast(other);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
     if (shared != shared_) return false;
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+        Smi::cast(pair->get(2))->value());
+    if (strict_mode != strict_mode_) return false;
     String* source = String::cast(pair->get(1));
     return source->Equals(source_);
   }
 
   static uint32_t StringSharedHashHelper(String* source,
-                                         SharedFunctionInfo* shared) {
+                                         SharedFunctionInfo* shared,
+                                         StrictModeFlag strict_mode) {
     uint32_t hash = source->Hash();
     if (shared->HasSourceCode()) {
       // Instead of using the SharedFunctionInfo pointer in the hash
@@ -8009,36 +8029,41 @@
       // collection.
       Script* script = Script::cast(shared->script());
       hash ^= String::cast(script->source())->Hash();
+      if (strict_mode == kStrictMode) hash ^= 0x8000;
       hash += shared->start_position();
     }
     return hash;
   }
 
   uint32_t Hash() {
-    return StringSharedHashHelper(source_, shared_);
+    return StringSharedHashHelper(source_, shared_, strict_mode_);
   }
 
   uint32_t HashForObject(Object* obj) {
     FixedArray* pair = FixedArray::cast(obj);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
     String* source = String::cast(pair->get(1));
-    return StringSharedHashHelper(source, shared);
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+        Smi::cast(pair->get(2))->value());
+    return StringSharedHashHelper(source, shared, strict_mode);
   }
 
   MUST_USE_RESULT MaybeObject* AsObject() {
     Object* obj;
-    { MaybeObject* maybe_obj = Heap::AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
     pair->set(0, shared_);
     pair->set(1, source_);
+    pair->set(2, Smi::FromInt(strict_mode_));
     return pair;
   }
 
  private:
   String* source_;
   SharedFunctionInfo* shared_;
+  StrictModeFlag strict_mode_;
 };
 
 
@@ -8997,8 +9022,10 @@
 }
 
 
-Object* CompilationCacheTable::LookupEval(String* src, Context* context) {
-  StringSharedKey key(src, context->closure()->shared());
+Object* CompilationCacheTable::LookupEval(String* src,
+                                          Context* context,
+                                          StrictModeFlag strict_mode) {
+  StringSharedKey key(src, context->closure()->shared(), strict_mode);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return Heap::undefined_value();
   return get(EntryToIndex(entry) + 1);
@@ -9033,8 +9060,10 @@
 
 MaybeObject* CompilationCacheTable::PutEval(String* src,
                                             Context* context,
-                                            Object* value) {
-  StringSharedKey key(src, context->closure()->shared());
+                                            SharedFunctionInfo* value) {
+  StringSharedKey key(src,
+                      context->closure()->shared(),
+                      value->strict_mode() ? kStrictMode : kNonStrictMode);
   Object* obj;
   { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
diff --git a/src/objects.h b/src/objects.h
index c9b3757..bf59830 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3275,13 +3275,13 @@
 
   // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
   // the instruction stream where the safepoint table starts.
-  inline unsigned safepoint_table_start();
-  inline void set_safepoint_table_start(unsigned offset);
+  inline unsigned safepoint_table_offset();
+  inline void set_safepoint_table_offset(unsigned offset);
 
   // [stack_check_table_start]: For kind FUNCTION, the offset in the
   // instruction stream where the stack check table starts.
-  inline unsigned stack_check_table_start();
-  inline void set_stack_check_table_start(unsigned offset);
+  inline unsigned stack_check_table_offset();
+  inline void set_stack_check_table_offset(unsigned offset);
 
   // [check type]: For kind CALL_IC, tells how to check if the
   // receiver is valid for the given call.
@@ -3445,8 +3445,8 @@
   static const int kAllowOSRAtLoopNestingLevelOffset =
       kHasDeoptimizationSupportOffset + 1;
 
-  static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize;
-  static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize;
+  static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
+  static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
 
   // Flags layout.
   static const int kFlagsICStateShift        = 0;
@@ -4176,6 +4176,10 @@
   inline bool optimization_disabled();
   inline void set_optimization_disabled(bool value);
 
+  // Indicates whether the function is a strict mode function.
+  inline bool strict_mode();
+  inline void set_strict_mode(bool value);
+
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -4357,6 +4361,7 @@
   static const int kCodeAgeShift = 4;
   static const int kCodeAgeMask = 0x7;
   static const int kOptimizationDisabled = 7;
+  static const int kStrictModeFunction = 8;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
@@ -4902,10 +4907,12 @@
  public:
   // Find cached value for a string key, otherwise return null.
   Object* Lookup(String* src);
-  Object* LookupEval(String* src, Context* context);
+  Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
   Object* LookupRegExp(String* source, JSRegExp::Flags flags);
   MaybeObject* Put(String* src, Object* value);
-  MaybeObject* PutEval(String* src, Context* context, Object* value);
+  MaybeObject* PutEval(String* src,
+                       Context* context,
+                       SharedFunctionInfo* value);
   MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
 
   // Remove given value from cache.
diff --git a/src/parser.cc b/src/parser.cc
index ccb3f64..5353a63 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -616,7 +616,8 @@
 
 
 FunctionLiteral* Parser::ParseProgram(Handle<String> source,
-                                      bool in_global_context) {
+                                      bool in_global_context,
+                                      StrictModeFlag strict_mode) {
   CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(&Counters::parse);
@@ -632,17 +633,18 @@
     ExternalTwoByteStringUC16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, &zone_scope);
+    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
   } else {
     GenericStringUC16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, &zone_scope);
+    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
   }
 }
 
 
 FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
                                         bool in_global_context,
+                                        StrictModeFlag strict_mode,
                                         ZoneScope* zone_scope) {
   ASSERT(target_stack_ == NULL);
   if (pre_data_ != NULL) pre_data_->Initialize();
@@ -662,6 +664,9 @@
     LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
                                scope);
     TemporaryScope temp_scope(&this->temp_scope_);
+    if (strict_mode == kStrictMode) {
+      temp_scope.EnableStrictMode();
+    }
     ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
@@ -747,10 +752,16 @@
                                scope);
     TemporaryScope temp_scope(&this->temp_scope_);
 
+    if (info->strict_mode()) {
+      temp_scope.EnableStrictMode();
+    }
+
     FunctionLiteralType type =
         info->is_expression() ? EXPRESSION : DECLARATION;
     bool ok = true;
-    result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
+    result = ParseFunctionLiteral(name,
+                                  false,    // Strict mode name already checked.
+                                  RelocInfo::kNoPosition, type, &ok);
     // Make sure the results agree.
     ASSERT(ok == (result != NULL));
     // The only errors should be stack overflows.
@@ -1439,8 +1450,10 @@
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int function_token_position = scanner().location().beg_pos;
-  Handle<String> name = ParseIdentifier(CHECK_OK);
+  bool is_reserved = false;
+  Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
   FunctionLiteral* fun = ParseFunctionLiteral(name,
+                                              is_reserved,
                                               function_token_position,
                                               DECLARATION,
                                               CHECK_OK);
@@ -1699,7 +1712,7 @@
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
-  bool starts_with_idenfifier = (peek() == Token::IDENTIFIER);
+  bool starts_with_idenfifier = peek_any_identifier();
   Expression* expr = ParseExpression(true, CHECK_OK);
   if (peek() == Token::COLON && starts_with_idenfifier && expr &&
       expr->AsVariableProxy() != NULL &&
@@ -2688,9 +2701,12 @@
     Expect(Token::FUNCTION, CHECK_OK);
     int function_token_position = scanner().location().beg_pos;
     Handle<String> name;
-    if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
-    result = ParseFunctionLiteral(name, function_token_position,
-                                  NESTED, CHECK_OK);
+    bool is_reserved_name = false;
+    if (peek_any_identifier()) {
+        name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+    }
+    result = ParseFunctionLiteral(name, is_reserved_name,
+                                  function_token_position, NESTED, CHECK_OK);
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
   }
@@ -2759,6 +2775,11 @@
     case Token::IDENTIFIER:
       return ReportMessage("unexpected_token_identifier",
                            Vector<const char*>::empty());
+    case Token::FUTURE_RESERVED_WORD:
+      return ReportMessage(temp_scope_->StrictMode() ?
+                               "unexpected_strict_reserved" :
+                               "unexpected_token_identifier",
+                           Vector<const char*>::empty());
     default:
       const char* name = Token::String(token);
       ASSERT(name != NULL);
@@ -2814,7 +2835,8 @@
       result = new Literal(Factory::false_value());
       break;
 
-    case Token::IDENTIFIER: {
+    case Token::IDENTIFIER:
+    case Token::FUTURE_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
       result = top_scope_->NewUnresolved(name, inside_with());
@@ -3221,6 +3243,7 @@
   Token::Value next = Next();
   bool is_keyword = Token::IsKeyword(next);
   if (next == Token::IDENTIFIER || next == Token::NUMBER ||
+      next == Token::FUTURE_RESERVED_WORD ||
       next == Token::STRING || is_keyword) {
     Handle<String> name;
     if (is_keyword) {
@@ -3230,6 +3253,7 @@
     }
     FunctionLiteral* value =
         ParseFunctionLiteral(name,
+                             false,   // reserved words are allowed here
                              RelocInfo::kNoPosition,
                              DECLARATION,
                              CHECK_OK);
@@ -3272,6 +3296,7 @@
     Scanner::Location loc = scanner().peek_location();
 
     switch (next) {
+      case Token::FUTURE_RESERVED_WORD:
       case Token::IDENTIFIER: {
         bool is_getter = false;
         bool is_setter = false;
@@ -3420,6 +3445,7 @@
 
 
 FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
+                                              bool name_is_reserved,
                                               int function_token_position,
                                               FunctionLiteralType type,
                                               bool* ok) {
@@ -3453,10 +3479,13 @@
     int start_pos = scanner().location().beg_pos;
     Scanner::Location name_loc = Scanner::NoLocation();
     Scanner::Location dupe_loc = Scanner::NoLocation();
+    Scanner::Location reserved_loc = Scanner::NoLocation();
 
     bool done = (peek() == Token::RPAREN);
     while (!done) {
-      Handle<String> param_name = ParseIdentifier(CHECK_OK);
+      bool is_reserved = false;
+      Handle<String> param_name =
+          ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
 
       // Store locations for possible future error reports.
       if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
@@ -3465,6 +3494,9 @@
       if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
         dupe_loc = scanner().location();
       }
+      if (!reserved_loc.IsValid() && is_reserved) {
+        reserved_loc = scanner().location();
+      }
 
       Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
       top_scope_->AddParameter(parameter);
@@ -3545,7 +3577,8 @@
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
-        ReportMessageAt(Scanner::Location(position, start_pos),
+        Scanner::Location location = Scanner::Location(position, start_pos);
+        ReportMessageAt(location,
                         "strict_function_name", Vector<const char*>::empty());
         *ok = false;
         return NULL;
@@ -3562,6 +3595,22 @@
         *ok = false;
         return NULL;
       }
+      if (name_is_reserved) {
+        int position = function_token_position != RelocInfo::kNoPosition
+            ? function_token_position
+            : (start_pos > 0 ? start_pos - 1 : start_pos);
+        Scanner::Location location = Scanner::Location(position, start_pos);
+        ReportMessageAt(location, "strict_reserved_word",
+                        Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      }
+      if (reserved_loc.IsValid()) {
+        ReportMessageAt(reserved_loc, "strict_reserved_word",
+                        Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      }
       CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
     }
 
@@ -3633,6 +3682,13 @@
 }
 
 
+bool Parser::peek_any_identifier() {
+  Token::Value next = peek();
+  return next == Token::IDENTIFIER ||
+         next == Token::FUTURE_RESERVED_WORD;
+}
+
+
 void Parser::Consume(Token::Value token) {
   Token::Value next = Next();
   USE(next);
@@ -3692,7 +3748,22 @@
 
 
 Handle<String> Parser::ParseIdentifier(bool* ok) {
-  Expect(Token::IDENTIFIER, ok);
+  bool is_reserved;
+  return ParseIdentifierOrReservedWord(&is_reserved, ok);
+}
+
+
+Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
+                                                     bool* ok) {
+  *is_reserved = false;
+  if (temp_scope_->StrictMode()) {
+    Expect(Token::IDENTIFIER, ok);
+  } else {
+    if (!Check(Token::IDENTIFIER)) {
+      Expect(Token::FUTURE_RESERVED_WORD, ok);
+      *is_reserved = true;
+    }
+  }
   if (!*ok) return Handle<String>();
   return GetSymbol(ok);
 }
@@ -3700,7 +3771,9 @@
 
 Handle<String> Parser::ParseIdentifierName(bool* ok) {
   Token::Value next = Next();
-  if (next != Token::IDENTIFIER && !Token::IsKeyword(next)) {
+  if (next != Token::IDENTIFIER &&
+          next != Token::FUTURE_RESERVED_WORD &&
+          !Token::IsKeyword(next)) {
     ReportUnexpectedToken(next);
     *ok = false;
     return Handle<String>();
@@ -3740,20 +3813,18 @@
 
 
 // This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.  The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
 Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
                                                  bool* is_set,
                                                  bool* ok) {
-  Expect(Token::IDENTIFIER, ok);
+  Handle<String> result = ParseIdentifier(ok);
   if (!*ok) return Handle<String>();
   if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
     const char* token = scanner().literal_ascii_string().start();
     *is_get = strncmp(token, "get", 3) == 0;
     *is_set = !*is_get && strncmp(token, "set", 3) == 0;
   }
-  return GetSymbol(ok);
+  return result;
 }
 
 
@@ -3895,6 +3966,7 @@
           message = "unexpected_token_string";
           break;
         case Token::IDENTIFIER:
+        case Token::FUTURE_RESERVED_WORD:
           message = "unexpected_token_identifier";
           break;
         default:
@@ -3941,16 +4013,10 @@
 Handle<Object> JsonParser::ParseJsonValue() {
   Token::Value token = scanner_.Next();
   switch (token) {
-    case Token::STRING: {
+    case Token::STRING:
       return GetString();
-    }
-    case Token::NUMBER: {
-      ASSERT(scanner_.is_literal_ascii());
-      double value = StringToDouble(scanner_.literal_ascii_string(),
-                                    NO_FLAGS,  // Hex, octal or trailing junk.
-                                    OS::nan_value());
-      return Factory::NewNumber(value);
-    }
+    case Token::NUMBER:
+      return Factory::NewNumber(scanner_.number());
     case Token::FALSE_LITERAL:
       return Factory::false_value();
     case Token::TRUE_LITERAL:
@@ -5024,7 +5090,9 @@
       ASSERT(Top::has_pending_exception());
     } else {
       Handle<String> source = Handle<String>(String::cast(script->source()));
-      result = parser.ParseProgram(source, info->is_global());
+      result = parser.ParseProgram(source,
+                                   info->is_global(),
+                                   info->StrictMode());
     }
   }
 
diff --git a/src/parser.h b/src/parser.h
index 68983b4..aa8d525 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -423,7 +423,8 @@
 
   // Returns NULL if parsing failed.
   FunctionLiteral* ParseProgram(Handle<String> source,
-                                bool in_global_context);
+                                bool in_global_context,
+                                StrictModeFlag strict_mode);
 
   FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
 
@@ -446,6 +447,7 @@
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(Handle<String> source,
                                   bool in_global_context,
+                                  StrictModeFlag strict_mode,
                                   ZoneScope* zone_scope);
 
   // Report syntax error
@@ -546,6 +548,7 @@
 
   ZoneList<Expression*>* ParseArguments(bool* ok);
   FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
+                                        bool name_is_reserved,
                                         int function_token_position,
                                         FunctionLiteralType type,
                                         bool* ok);
@@ -575,6 +578,8 @@
     return scanner().Next();
   }
 
+  bool peek_any_identifier();
+
   INLINE(void Consume(Token::Value token));
   void Expect(Token::Value token, bool* ok);
   bool Check(Token::Value token);
@@ -608,6 +613,7 @@
   Literal* GetLiteralNumber(double value);
 
   Handle<String> ParseIdentifier(bool* ok);
+  Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
   Handle<String> ParseIdentifierName(bool* ok);
   Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
                                            bool* is_set,
diff --git a/src/preparser.cc b/src/preparser.cc
index c0dcc0b..252e88f 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -83,6 +83,7 @@
     return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
                            "unexpected_token_string", NULL);
   case i::Token::IDENTIFIER:
+  case i::Token::FUTURE_RESERVED_WORD:
     return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
                            "unexpected_token_identifier", NULL);
   default:
@@ -790,7 +791,7 @@
   Expression result = kUnknownExpression;
   if (peek() == i::Token::FUNCTION) {
     Consume(i::Token::FUNCTION);
-    if (peek() == i::Token::IDENTIFIER) {
+    if (peek_any_identifier()) {
       ParseIdentifier(CHECK_OK);
     }
     result = ParseFunctionLiteral(CHECK_OK);
@@ -858,7 +859,8 @@
       break;
     }
 
-    case i::Token::IDENTIFIER: {
+    case i::Token::IDENTIFIER:
+    case i::Token::FUTURE_RESERVED_WORD: {
       ParseIdentifier(CHECK_OK);
       result = kIdentifierExpression;
       break;
@@ -946,7 +948,8 @@
   while (peek() != i::Token::RBRACE) {
     i::Token::Value next = peek();
     switch (next) {
-      case i::Token::IDENTIFIER: {
+      case i::Token::IDENTIFIER:
+      case i::Token::FUTURE_RESERVED_WORD: {
         bool is_getter = false;
         bool is_setter = false;
         ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
@@ -954,6 +957,7 @@
             i::Token::Value name = Next();
             bool is_keyword = i::Token::IsKeyword(name);
             if (name != i::Token::IDENTIFIER &&
+                name != i::Token::FUTURE_RESERVED_WORD &&
                 name != i::Token::NUMBER &&
                 name != i::Token::STRING &&
                 !is_keyword) {
@@ -1151,7 +1155,9 @@
 
 
 PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
-  Expect(i::Token::IDENTIFIER, ok);
+  if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
+    Expect(i::Token::IDENTIFIER, ok);
+  }
   if (!*ok) return kUnknownIdentifier;
   return GetIdentifierSymbol();
 }
@@ -1166,7 +1172,8 @@
                                                     i::StrLength(keyword)));
     return kUnknownExpression;
   }
-  if (next == i::Token::IDENTIFIER) {
+  if (next == i::Token::IDENTIFIER ||
+      next == i::Token::FUTURE_RESERVED_WORD) {
     return GetIdentifierSymbol();
   }
   *ok = false;
@@ -1175,19 +1182,23 @@
 
 
 // This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.  The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
 PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
                                                            bool* is_set,
                                                            bool* ok) {
-  Expect(i::Token::IDENTIFIER, CHECK_OK);
+  PreParser::Identifier result = ParseIdentifier(CHECK_OK);
   if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
     const char* token = scanner_->literal_ascii_string().start();
     *is_get = strncmp(token, "get", 3) == 0;
     *is_set = !*is_get && strncmp(token, "set", 3) == 0;
   }
-  return GetIdentifierSymbol();
+  return result;
+}
+
+bool PreParser::peek_any_identifier() {
+  i::Token::Value next = peek();
+  return next == i::Token::IDENTIFIER ||
+         next == i::Token::FUTURE_RESERVED_WORD;
 }
 
 #undef CHECK_OK
diff --git a/src/preparser.h b/src/preparser.h
index 66fad3b..b7fa6c7 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -243,6 +243,8 @@
     return scanner_->Next();
   }
 
+  bool peek_any_identifier();
+
   void Consume(i::Token::Value token) { Next(); }
 
   void Expect(i::Token::Value token, bool* ok) {
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 211f3f6..dda7abb 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -297,13 +297,13 @@
       Print("parameter[%d]", node->index());
       break;
     case Slot::LOCAL:
-      Print("frame[%d]", node->index());
+      Print("local[%d]", node->index());
       break;
     case Slot::CONTEXT:
-      Print(".context[%d]", node->index());
+      Print("context[%d]", node->index());
       break;
     case Slot::LOOKUP:
-      Print(".context[");
+      Print("lookup[");
       PrintLiteral(node->var()->name(), false);
       Print("]");
       break;
@@ -999,24 +999,7 @@
 
 void AstPrinter::VisitSlot(Slot* node) {
   PrintIndented("SLOT ");
-  switch (node->type()) {
-    case Slot::PARAMETER:
-      Print("parameter[%d]", node->index());
-      break;
-    case Slot::LOCAL:
-      Print("frame[%d]", node->index());
-      break;
-    case Slot::CONTEXT:
-      Print(".context[%d]", node->index());
-      break;
-    case Slot::LOOKUP:
-      Print(".context[");
-      PrintLiteral(node->var()->name(), false);
-      Print("]");
-      break;
-    default:
-      UNREACHABLE();
-  }
+  PrettyPrinter::VisitSlot(node);
   Print("\n");
 }
 
diff --git a/src/runtime.cc b/src/runtime.cc
index d55a201..4994378 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -644,6 +644,90 @@
 }
 
 
+static bool CheckAccessException(LookupResult* result,
+                                 v8::AccessType access_type) {
+  if (result->type() == CALLBACKS) {
+    Object* callback = result->GetCallbackObject();
+    if (callback->IsAccessorInfo()) {
+      AccessorInfo* info = AccessorInfo::cast(callback);
+      bool can_access =
+          (access_type == v8::ACCESS_HAS &&
+              (info->all_can_read() || info->all_can_write())) ||
+          (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+          (access_type == v8::ACCESS_SET && info->all_can_write());
+      return can_access;
+    }
+  }
+
+  return false;
+}
+
+
+static bool CheckAccess(JSObject* obj,
+                        String* name,
+                        LookupResult* result,
+                        v8::AccessType access_type) {
+  ASSERT(result->IsProperty());
+
+  JSObject* holder = result->holder();
+  JSObject* current = obj;
+  while (true) {
+    if (current->IsAccessCheckNeeded() &&
+        !Top::MayNamedAccess(current, name, access_type)) {
+      // Access check callback denied the access, but some properties
+      // can have a special permissions which override callbacks descision
+      // (currently see v8::AccessControl).
+      break;
+    }
+
+    if (current == holder) {
+      return true;
+    }
+
+    current = JSObject::cast(current->GetPrototype());
+  }
+
+  // API callbacks can have per callback access exceptions.
+  switch (result->type()) {
+    case CALLBACKS: {
+      if (CheckAccessException(result, access_type)) {
+        return true;
+      }
+      break;
+    }
+    case INTERCEPTOR: {
+      // If the object has an interceptor, try real named properties.
+      // Overwrite the result to fetch the correct property later.
+      holder->LookupRealNamedProperty(name, result);
+      if (result->IsProperty()) {
+        if (CheckAccessException(result, access_type)) {
+          return true;
+        }
+      }
+      break;
+    }
+    default:
+      break;
+  }
+
+  Top::ReportFailedAccessCheck(current, access_type);
+  return false;
+}
+
+
+// TODO(1095): we should traverse hidden prototype hierachy as well.
+static bool CheckElementAccess(JSObject* obj,
+                               uint32_t index,
+                               v8::AccessType access_type) {
+  if (obj->IsAccessCheckNeeded() &&
+      !Top::MayIndexedAccess(obj, index, access_type)) {
+    return false;
+  }
+
+  return true;
+}
+
+
 // Enumerator used as indices into the array returned from GetOwnProperty
 enum PropertyDescriptorIndices {
   IS_ACCESSOR_INDEX,
@@ -686,7 +770,7 @@
         // subsequent cases.
         Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
         Handle<String> str(String::cast(js_value->value()));
-        Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
+        Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
 
         elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
         elms->set(VALUE_INDEX, *substr);
@@ -699,8 +783,7 @@
       case JSObject::INTERCEPTED_ELEMENT:
       case JSObject::FAST_ELEMENT: {
         elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-        Handle<Object> element = GetElement(Handle<Object>(obj), index);
-        elms->set(VALUE_INDEX, *element);
+        elms->set(VALUE_INDEX, *GetElement(obj, index));
         elms->set(WRITABLE_INDEX, Heap::true_value());
         elms->set(ENUMERABLE_INDEX,  Heap::true_value());
         elms->set(CONFIGURABLE_INDEX, Heap::true_value());
@@ -708,7 +791,14 @@
       }
 
       case JSObject::DICTIONARY_ELEMENT: {
-        NumberDictionary* dictionary = obj->element_dictionary();
+        Handle<JSObject> holder = obj;
+        if (obj->IsJSGlobalProxy()) {
+          Object* proto = obj->GetPrototype();
+          if (proto->IsNull()) return Heap::undefined_value();
+          ASSERT(proto->IsJSGlobalObject());
+          holder = Handle<JSObject>(JSObject::cast(proto));
+        }
+        NumberDictionary* dictionary = holder->element_dictionary();
         int entry = dictionary->FindEntry(index);
         ASSERT(entry != NumberDictionary::kNotFound);
         PropertyDetails details = dictionary->DetailsAt(entry);
@@ -718,14 +808,18 @@
             FixedArray* callbacks =
                 FixedArray::cast(dictionary->ValueAt(entry));
             elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
-            elms->set(GETTER_INDEX, callbacks->get(0));
-            elms->set(SETTER_INDEX, callbacks->get(1));
+            if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
+              elms->set(GETTER_INDEX, callbacks->get(0));
+            }
+            if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
+              elms->set(SETTER_INDEX, callbacks->get(1));
+            }
             break;
           }
           case NORMAL:
             // This is a data property.
             elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-            elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+            elms->set(VALUE_INDEX, *GetElement(obj, index));
             elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
             break;
           default:
@@ -746,6 +840,10 @@
     return Heap::undefined_value();
   }
 
+  if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
+    return Heap::false_value();
+  }
+
   elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
   elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
 
@@ -754,16 +852,22 @@
 
   if (is_js_accessor) {
     // __defineGetter__/__defineSetter__ callback.
-    FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
     elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
-    elms->set(GETTER_INDEX, structure->get(0));
-    elms->set(SETTER_INDEX, structure->get(1));
+
+    FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
+    if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+      elms->set(GETTER_INDEX, structure->get(0));
+    }
+    if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+      elms->set(SETTER_INDEX, structure->get(1));
+    }
   } else {
     elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
     elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
 
     PropertyAttributes attrs;
     Object* value;
+    // GetProperty will check access and report any violations.
     { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
       if (!maybe_value->ToObject(&value)) return maybe_value;
     }
@@ -3487,8 +3591,10 @@
     HandleScope scope;
     Handle<String> str = args.at<String>(0);
     int index = Smi::cast(args[1])->value();
-    Handle<Object> result = GetCharAt(str, index);
-    return *result;
+    if (index >= 0 && index < str->length()) {
+      Handle<Object> result = GetCharAt(str, index);
+      return *result;
+    }
   }
 
   // Fall back to GetObjectProperty.
@@ -3496,7 +3602,12 @@
                                     args.at<Object>(1));
 }
 
-
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4b - define a new accessor property.
+// Steps 9c & 12 - replace an existing data property with an accessor property.
+// Step 12 - update an existing accessor property with an accessor or generic
+//           descriptor.
 static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
   ASSERT(args.length() == 5);
   HandleScope scope;
@@ -3528,6 +3639,12 @@
   return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
 }
 
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4a - define a new data property.
+// Steps 9b & 12 - replace an existing accessor property with a data property.
+// Step 12 - update an existing data property with a data or generic
+//           descriptor.
 static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
   ASSERT(args.length() == 4);
   HandleScope scope;
@@ -3551,7 +3668,9 @@
   if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
       is_element) {
     // Normalize the elements to enable attributes on the property.
-    NormalizeElements(js_object);
+    if (!js_object->IsJSGlobalProxy()) {
+      NormalizeElements(js_object);
+    }
     Handle<NumberDictionary> dictionary(js_object->element_dictionary());
     // Make sure that we never go back to fast case.
     dictionary->set_requires_slow_elements();
@@ -3571,7 +3690,9 @@
   if (result.IsProperty() &&
       (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
     // New attributes - normalize to avoid writing to instance descriptor
-    NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+    if (!js_object->IsJSGlobalProxy()) {
+      NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+    }
     // Use IgnoreAttributes version since a readonly property may be
     // overridden and SetProperty does not allow this.
     return js_object->SetLocalPropertyIgnoreAttributes(*name,
@@ -4167,7 +4288,7 @@
 
   ASSERT(args.length() == 1);
   Handle<Object> object = args.at<Object>(0);
-  if (object->IsJSObject()) {
+  if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
     Handle<JSObject> js_object = Handle<JSObject>::cast(object);
     NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
   }
@@ -6889,7 +7010,7 @@
     // the AST id matching the PC.
     Address start = unoptimized->instruction_start();
     unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
-    Address table_cursor = start + unoptimized->stack_check_table_start();
+    Address table_cursor = start + unoptimized->stack_check_table_offset();
     uint32_t table_length = Memory::uint32_at(table_cursor);
     table_cursor += kIntSize;
     for (unsigned i = 0; i < table_length; ++i) {
@@ -7553,7 +7674,8 @@
   Handle<Context> context(Top::context()->global_context());
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
                                                             context,
-                                                            true);
+                                                            true,
+                                                            kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
       Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
@@ -7562,13 +7684,15 @@
 
 
 static ObjectPair CompileGlobalEval(Handle<String> source,
-                                    Handle<Object> receiver) {
+                                    Handle<Object> receiver,
+                                    StrictModeFlag mode) {
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(Top::context()),
-      Top::context()->IsGlobalContext());
+      Top::context()->IsGlobalContext(),
+      mode);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
       shared,
@@ -7579,7 +7703,7 @@
 
 
 static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   if (!args[0]->IsJSFunction()) {
     return MakePair(Top::ThrowIllegalOperation(), NULL);
   }
@@ -7643,12 +7767,16 @@
     return MakePair(*callee, Top::context()->global()->global_receiver());
   }
 
-  return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+  ASSERT(args[3]->IsSmi());
+  return CompileGlobalEval(args.at<String>(1),
+                           args.at<Object>(2),
+                           static_cast<StrictModeFlag>(
+                                Smi::cast(args[3])->value()));
 }
 
 
 static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   if (!args[0]->IsJSFunction()) {
     return MakePair(Top::ThrowIllegalOperation(), NULL);
   }
@@ -7663,7 +7791,11 @@
     return MakePair(*callee, Top::context()->global()->global_receiver());
   }
 
-  return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+  ASSERT(args[3]->IsSmi());
+  return CompileGlobalEval(args.at<String>(1),
+                           args.at<Object>(2),
+                           static_cast<StrictModeFlag>(
+                                Smi::cast(args[3])->value()));
 }
 
 
@@ -9800,10 +9932,14 @@
   Handle<String> function_source =
       Factory::NewStringFromAscii(Vector<const char>(source_str,
                                                      source_str_length));
+
+  // Currently, the eval code will be executed in non-strict mode,
+  // even in the strict code context.
   Handle<SharedFunctionInfo> shared =
       Compiler::CompileEval(function_source,
                             context,
-                            context->IsGlobalContext());
+                            context->IsGlobalContext(),
+                            kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Factory::NewFunctionFromSharedFunctionInfo(shared, context);
@@ -9885,10 +10021,10 @@
   }
 
   // Compile the source to be evaluated.
+  // Currently, the eval code will be executed in non-strict mode,
+  // even in the strict code context.
   Handle<SharedFunctionInfo> shared =
-      Compiler::CompileEval(source,
-                            context,
-                            is_global);
+      Compiler::CompileEval(source, context, is_global, kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
diff --git a/src/runtime.h b/src/runtime.h
index b201eb8..fb2ff93 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -237,8 +237,8 @@
   \
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
-  F(ResolvePossiblyDirectEval, 3, 2) \
-  F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \
+  F(ResolvePossiblyDirectEval, 4, 2) \
+  F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
   \
   F(SetProperty, -1 /* 3 or 4 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index 34e9cf4..d2ec54c 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -58,7 +58,7 @@
 SafepointTable::SafepointTable(Code* code) {
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
   code_ = code;
-  Address header = code->instruction_start() + code->safepoint_table_start();
+  Address header = code->instruction_start() + code->safepoint_table_offset();
   length_ = Memory::uint32_at(header + kLengthOffset);
   entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
   pc_and_deoptimization_indexes_ = header + kHeaderSize;
@@ -230,4 +230,24 @@
 }
 
 
+int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
+  int result = 0;
+  if (!deoptimization_info_.is_empty()) {
+    unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
+    for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
+      DeoptimizationInfo info = deoptimization_info_[i];
+      if (static_cast<int>(info.deoptimization_index) !=
+          Safepoint::kNoDeoptimizationIndex) {
+        if (previous_gap_end + limit > info.pc) {
+          result++;
+        }
+        previous_gap_end = info.pc_after_gap;
+      }
+    }
+  }
+  return result;
+}
+
+
+
 } }  // namespace v8::internal
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index eeeae37..8803d06 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -220,8 +220,8 @@
                             int arguments,
                             int deoptimization_index);
 
-  // Update the last safepoint with the size of the code generated for the gap
-  // following it.
+  // Update the last safepoint with the size of the code generated until the
+  // end of the gap following it.
   void SetPcAfterGap(int pc) {
     ASSERT(!deoptimization_info_.is_empty());
     int index = deoptimization_info_.length() - 1;
@@ -232,6 +232,11 @@
   // entry must be enough to hold all the pointer indexes.
   void Emit(Assembler* assembler, int bits_per_entry);
 
+  // Count the number of deoptimization points where the next
+  // following deoptimization point comes less than limit bytes
+  // after the end of this point's gap.
+  int CountShortDeoptimizationIntervals(unsigned limit);
+
  private:
   struct DeoptimizationInfo {
     unsigned pc;
@@ -247,8 +252,8 @@
   ZoneList<ZoneList<int>*> indexes_;
   ZoneList<ZoneList<int>*> registers_;
 
-  bool emitted_;
   unsigned offset_;
+  bool emitted_;
 
   DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
 };
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index fe33f38..80bca4e 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -796,25 +796,27 @@
   { "break",  KEYWORD_PREFIX, Token::BREAK },
   { NULL,     C,              Token::ILLEGAL },
   { NULL,     D,              Token::ILLEGAL },
-  { "else",   KEYWORD_PREFIX, Token::ELSE },
+  { NULL,     E,              Token::ILLEGAL },
   { NULL,     F,              Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { NULL,     I,              Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
+  { "let",    KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { NULL,     N,              Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
-  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
+  { NULL,     P,              Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { "return", KEYWORD_PREFIX, Token::RETURN },
-  { "switch", KEYWORD_PREFIX, Token::SWITCH },
+  { NULL,     S,              Token::ILLEGAL },
   { NULL,     T,              Token::ILLEGAL },
   { NULL,     UNMATCHABLE,    Token::ILLEGAL },
   { NULL,     V,              Token::ILLEGAL },
-  { NULL,     W,              Token::ILLEGAL }
+  { NULL,     W,              Token::ILLEGAL },
+  { NULL,     UNMATCHABLE,    Token::ILLEGAL },
+  { "yield",  KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
 };
 
 
@@ -822,7 +824,7 @@
   switch (state_) {
     case INITIAL: {
       // matching the first character is the only state with significant fanout.
-      // Match only lower-case letters in range 'b'..'w'.
+      // Match only lower-case letters in range 'b'..'y'.
       unsigned int offset = input - kFirstCharRangeMin;
       if (offset < kFirstCharRangeLength) {
         state_ = first_states_[offset].state;
@@ -850,6 +852,8 @@
       break;
     case C:
       if (MatchState(input, 'a', CA)) return;
+      if (MatchKeywordStart(input, "class", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
       if (MatchState(input, 'o', CO)) return;
       break;
     case CA:
@@ -872,6 +876,18 @@
       if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
       if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
       break;
+    case E:
+      if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
+      if (MatchKeywordStart(input, "enum", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchState(input, 'x', EX)) return;
+      break;
+    case EX:
+      if (MatchKeywordStart(input, "export", 2,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchKeywordStart(input, "extends", 2,
+          Token::FUTURE_RESERVED_WORD)) return;
+      break;
     case F:
       if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
       if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
@@ -880,10 +896,22 @@
       break;
     case I:
       if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
+      if (MatchState(input, 'm', IM)) return;
       if (MatchKeyword(input, 'n', IN, Token::IN)) return;
       break;
+    case IM:
+      if (MatchState(input, 'p', IMP)) return;
+      break;
+    case IMP:
+      if (MatchKeywordStart(input, "implements", 3,
+         Token::FUTURE_RESERVED_WORD )) return;
+      if (MatchKeywordStart(input, "import", 3,
+         Token::FUTURE_RESERVED_WORD)) return;
+      break;
     case IN:
       token_ = Token::IDENTIFIER;
+      if (MatchKeywordStart(input, "interface", 2,
+         Token::FUTURE_RESERVED_WORD)) return;
       if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
       break;
     case N:
@@ -891,6 +919,27 @@
       if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
       if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
       break;
+    case P:
+      if (MatchKeywordStart(input, "package", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchState(input, 'r', PR)) return;
+      if (MatchKeywordStart(input, "public", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
+      break;
+    case PR:
+      if (MatchKeywordStart(input, "private", 2,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchKeywordStart(input, "protected", 2,
+          Token::FUTURE_RESERVED_WORD)) return;
+      break;
+    case S:
+      if (MatchKeywordStart(input, "static", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchKeywordStart(input, "super", 1,
+          Token::FUTURE_RESERVED_WORD)) return;
+      if (MatchKeywordStart(input, "switch", 1,
+          Token::SWITCH)) return;
+      break;
     case T:
       if (MatchState(input, 'h', TH)) return;
       if (MatchState(input, 'r', TR)) return;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 7ac1d35..f5fe7f7 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -564,10 +564,17 @@
     CON,
     D,
     DE,
+    E,
+    EX,
     F,
     I,
+    IM,
+    IMP,
     IN,
     N,
+    P,
+    PR,
+    S,
     T,
     TH,
     TR,
@@ -583,7 +590,7 @@
 
   // Range of possible first characters of a keyword.
   static const unsigned int kFirstCharRangeMin = 'b';
-  static const unsigned int kFirstCharRangeMax = 'w';
+  static const unsigned int kFirstCharRangeMax = 'y';
   static const unsigned int kFirstCharRangeLength =
       kFirstCharRangeMax - kFirstCharRangeMin + 1;
   // State map for first keyword character range.
diff --git a/src/scanner.cc b/src/scanner.cc
index b66d10b..cab8c58 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -516,17 +516,30 @@
 
 Token::Value JsonScanner::ScanJsonNumber() {
   LiteralScope literal(this);
-  if (c0_ == '-') AddLiteralCharAdvance();
+  bool negative = false;
+
+  if (c0_ == '-') {
+    AddLiteralCharAdvance();
+    negative = true;
+  }
   if (c0_ == '0') {
     AddLiteralCharAdvance();
     // Prefix zero is only allowed if it's the only digit before
     // a decimal point or exponent.
     if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
   } else {
+    int i = 0;
+    int digits = 0;
     if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
     do {
+      i = i * 10 + c0_ - '0';
+      digits++;
       AddLiteralCharAdvance();
     } while (c0_ >= '0' && c0_ <= '9');
+    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+      number_ = (negative ? -i : i);
+      return Token::NUMBER;
+    }
   }
   if (c0_ == '.') {
     AddLiteralCharAdvance();
@@ -544,6 +557,10 @@
     } while (c0_ >= '0' && c0_ <= '9');
   }
   literal.Complete();
+  ASSERT_NOT_NULL(next_.literal_chars);
+  number_ = StringToDouble(next_.literal_chars->ascii_literal(),
+                           NO_FLAGS,  // Hex, octal or trailing junk.
+                           OS::nan_value());
   return Token::NUMBER;
 }
 
diff --git a/src/scanner.h b/src/scanner.h
index d762182..cf2084f 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -148,6 +148,12 @@
   // Returns the next token.
   Token::Value Next();
 
+  // Returns the value of a number token.
+  double number() {
+    return number_;
+  }
+
+
  protected:
   // Skip past JSON whitespace (only space, tab, newline and carrige-return).
   bool SkipJsonWhiteSpace();
@@ -178,6 +184,9 @@
   // are the only valid JSON identifiers (productions JSONBooleanLiteral,
   // JSONNullLiteral).
   Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
+  // Holds the value of a scanned number token.
+  double number_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopes.cc b/src/scopes.cc
index d3f54ad..fd573b0 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -383,8 +383,7 @@
 
 
 void Scope::SetIllegalRedeclaration(Expression* expression) {
-  // Only set the illegal redeclaration expression the
-  // first time the function is called.
+  // Record only the first illegal redeclaration.
   if (!HasIllegalRedeclaration()) {
     illegal_redecl_ = expression;
   }
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 15f128d..7317141 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -441,6 +441,12 @@
 
 
 MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
+  // Using NORMAL as the PropertyType for array element loads is a misuse. The
+  // generated stub always accesses fast elements, not slow-mode fields, but
+  // some property type is required for the stub lookup. Note that overloading
+  // the NORMAL PropertyType is only safe as long as no stubs are generated for
+  // other keyed field loads. This is guaranteed to be the case since all field
+  // keyed loads that are not array elements go through a generic builtin stub.
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
   String* name = Heap::KeyedLoadSpecialized_symbol();
@@ -461,6 +467,33 @@
 }
 
 
+MaybeObject* StubCache::ComputeKeyedLoadPixelArray(JSObject* receiver) {
+  // Using NORMAL as the PropertyType for array element loads is a misuse. The
+  // generated stub always accesses fast elements, not slow-mode fields, but
+  // some property type is required for the stub lookup. Note that overloading
+  // the NORMAL PropertyType is only safe as long as no stubs are generated for
+  // other keyed field loads. This is guaranteed to be the case since all field
+  // keyed loads that are not array elements go through a generic builtin stub.
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
+  String* name = Heap::KeyedLoadPixelArray_symbol();
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadPixelArray(receiver);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
 MaybeObject* StubCache::ComputeStoreField(String* name,
                                           JSObject* receiver,
                                           int field_index,
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 07f894a..6d78279 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -133,6 +133,9 @@
   MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
       JSObject* receiver);
 
+  MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadPixelArray(
+      JSObject* receiver);
+
   // ---
 
   MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
@@ -607,6 +610,7 @@
   MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
   MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* CompileLoadPixelArray(JSObject* receiver);
 
  private:
   MaybeObject* GetCode(PropertyType type, String* name);
diff --git a/src/third_party/strongtalk/README.chromium b/src/third_party/strongtalk/README.chromium
deleted file mode 100644
index ba2b789..0000000
--- a/src/third_party/strongtalk/README.chromium
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Strongtalk
-URL: http://www.strongtalk.org/
-
-Code from the Strongtalk assembler is used with modification in the following
-files:
-
-src/assembler.h
-src/assembler.cc
-src/arm/assembler-arm.cc
-src/arm/assembler-arm.h
-src/arm/assembler-arm-inl.h
-src/ia32/assembler-ia32.cc
-src/ia32/assembler-ia32.h
-src/ia32/assembler-ia32-inl.h
-src/mips/assembler-mips.cc
-src/mips/assembler-mips.h
-src/mips/assembler-mips-inl.h
-src/x64/assembler-x64.h
diff --git a/src/token.h b/src/token.h
index fb890d2..776d9f3 100644
--- a/src/token.h
+++ b/src/token.h
@@ -155,38 +155,6 @@
   K(WHILE, "while", 0)                                                  \
   K(WITH, "with", 0)                                                    \
                                                                         \
-  /* Future reserved words (ECMA-262, section 7.5.3, page 14). */       \
-  F(ABSTRACT, "abstract", 0)                                            \
-  F(BOOLEAN, "boolean", 0)                                              \
-  F(BYTE, "byte", 0)                                                    \
-  F(CHAR, "char", 0)                                                    \
-  F(CLASS, "class", 0)                                                  \
-  K(CONST, "const", 0)                                                  \
-  F(DOUBLE, "double", 0)                                                \
-  F(ENUM, "enum", 0)                                                    \
-  F(EXPORT, "export", 0)                                                \
-  F(EXTENDS, "extends", 0)                                              \
-  F(FINAL, "final", 0)                                                  \
-  F(FLOAT, "float", 0)                                                  \
-  F(GOTO, "goto", 0)                                                    \
-  F(IMPLEMENTS, "implements", 0)                                        \
-  F(IMPORT, "import", 0)                                                \
-  F(INT, "int", 0)                                                      \
-  F(INTERFACE, "interface", 0)                                          \
-  F(LONG, "long", 0)                                                    \
-  K(NATIVE, "native", 0)                                                \
-  F(PACKAGE, "package", 0)                                              \
-  F(PRIVATE, "private", 0)                                              \
-  F(PROTECTED, "protected", 0)                                          \
-  F(PUBLIC, "public", 0)                                                \
-  F(SHORT, "short", 0)                                                  \
-  F(STATIC, "static", 0)                                                \
-  F(SUPER, "super", 0)                                                  \
-  F(SYNCHRONIZED, "synchronized", 0)                                    \
-  F(THROWS, "throws", 0)                                                \
-  F(TRANSIENT, "transient", 0)                                          \
-  F(VOLATILE, "volatile", 0)                                            \
-                                                                        \
   /* Literals (ECMA-262, section 7.8, page 16). */                      \
   K(NULL_LITERAL, "null", 0)                                            \
   K(TRUE_LITERAL, "true", 0)                                            \
@@ -197,6 +165,11 @@
   /* Identifiers (not keywords or future reserved words). */            \
   T(IDENTIFIER, NULL, 0)                                                \
                                                                         \
+  /* Future reserved words (ECMA-262, section 7.6.1.2). */              \
+  T(FUTURE_RESERVED_WORD, NULL, 0)                                      \
+  K(CONST, "const", 0)                                                  \
+  K(NATIVE, "native", 0)                                                \
+                                                                        \
   /* Illegal token - not able to scan. */                               \
   T(ILLEGAL, "ILLEGAL", 0)                                              \
                                                                         \
diff --git a/src/top.h b/src/top.h
index 5b0fd61..9d8aa82 100644
--- a/src/top.h
+++ b/src/top.h
@@ -32,11 +32,11 @@
 #include "compilation-cache.h"
 #include "frames-inl.h"
 #include "runtime-profiler.h"
-#include "simulator.h"
 
 namespace v8 {
 namespace internal {
 
+class Simulator;
 
 #define RETURN_IF_SCHEDULED_EXCEPTION() \
   if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
diff --git a/src/type-info.h b/src/type-info.h
index c7029c8..34ff584 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -120,9 +120,9 @@
   }
 
 
-  // Integer32 is an integer that can be represented as either a signed
-  // 32-bit integer or as an unsigned 32-bit integer. It has to be
-  // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+  // Integer32 is an integer that can be represented as a signed
+  // 32-bit integer. It has to be
+  // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
   // as it is not an Integer32.
   static inline bool IsInt32Double(double value) {
     const DoubleRepresentation minus_zero(-0.0);
diff --git a/src/uri.js b/src/uri.js
index 3adab83..179fa92 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -205,7 +205,7 @@
         octets[0] = cc;
         if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
         for (var i = 1; i < n; i++) {
-          k++;
+          if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
           octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
         }
         index = URIDecodeOctets(octets, result, index);
@@ -412,4 +412,3 @@
 }
 
 SetupURI();
-
diff --git a/src/v8globals.h b/src/v8globals.h
index 3f27114..85bd17e 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -469,6 +469,12 @@
                   ARMv7 = 2,   // ARM
                   SAHF = 0};   // x86
 
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+enum StrictModeFlag {
+  kNonStrictMode,
+  kStrictMode
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 233f8b4..b0fb5bf 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -491,28 +491,29 @@
 }
 
 
-
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
-  var desc = new PropertyDescriptor();
-
-  // GetOwnProperty returns an array indexed by the constants
-  // defined in macros.py.
-  // If p is not a property on obj undefined is returned.
-  var props = %GetOwnProperty(ToObject(obj), ToString(p));
-
-  if (IS_UNDEFINED(props)) return void 0;
-
-  // This is an accessor
-  if (props[IS_ACCESSOR_INDEX]) {
-    desc.setGet(props[GETTER_INDEX]);
-    desc.setSet(props[SETTER_INDEX]);
-  } else {
-    desc.setValue(props[VALUE_INDEX]);
-    desc.setWritable(props[WRITABLE_INDEX]);
+// Converts an array returned from Runtime_GetOwnProperty to an actual
+// property descriptor. For a description of the array layout please
+// see the runtime.cc file.
+function ConvertDescriptorArrayToDescriptor(desc_array) {
+  if (desc_array == false) {
+    throw 'Internal error: invalid desc_array';
   }
-  desc.setEnumerable(props[ENUMERABLE_INDEX]);
-  desc.setConfigurable(props[CONFIGURABLE_INDEX]);
+
+  if (IS_UNDEFINED(desc_array)) {
+    return void 0;
+  }
+
+  var desc = new PropertyDescriptor();
+  // This is an accessor.
+  if (desc_array[IS_ACCESSOR_INDEX]) {
+    desc.setGet(desc_array[GETTER_INDEX]);
+    desc.setSet(desc_array[SETTER_INDEX]);
+  } else {
+    desc.setValue(desc_array[VALUE_INDEX]);
+    desc.setWritable(desc_array[WRITABLE_INDEX]);
+  }
+  desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
+  desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
 
   return desc;
 }
@@ -535,9 +536,27 @@
 }
 
 
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
+  // GetOwnProperty returns an array indexed by the constants
+  // defined in macros.py.
+  // If p is not a property on obj undefined is returned.
+  var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+  // A false value here means that access checks failed.
+  if (props == false) return void 0;
+
+  return ConvertDescriptorArrayToDescriptor(props);
+}
+
+
 // ES5 8.12.9.
 function DefineOwnProperty(obj, p, desc, should_throw) {
-  var current = GetOwnProperty(obj, p);
+  var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
+  // A false value here means that access checks failed.
+  if (current_or_access == false) return void 0;
+
+  var current = ConvertDescriptorArrayToDescriptor(current_or_access);
   var extensible = %IsExtensible(ToObject(obj));
 
   // Error handling according to spec.
@@ -545,10 +564,12 @@
   if (IS_UNDEFINED(current) && !extensible)
     throw MakeTypeError("define_disallowed", ["defineProperty"]);
 
-  if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+  if (!IS_UNDEFINED(current)) {
     // Step 5 and 6
-    if ((!desc.hasEnumerable() ||
-         SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+    if ((IsGenericDescriptor(desc) ||
+         IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
+        (!desc.hasEnumerable() ||
+         SameValue(desc.isEnumerable(), current.isEnumerable())) &&
         (!desc.hasConfigurable() ||
          SameValue(desc.isConfigurable(), current.isConfigurable())) &&
         (!desc.hasWritable() ||
@@ -561,30 +582,36 @@
          SameValue(desc.getSet(), current.getSet()))) {
       return true;
     }
-
-    // Step 7
-    if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
-      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
-    // Step 9
-    if (IsDataDescriptor(current) != IsDataDescriptor(desc))
-      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
-    // Step 10
-    if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
-      if (!current.isWritable() && desc.isWritable())
+    if (!current.isConfigurable()) {
+      // Step 7
+      if (desc.isConfigurable() ||
+          (desc.hasEnumerable() &&
+           desc.isEnumerable() != current.isEnumerable()))
         throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
-      if (!current.isWritable() && desc.hasValue() &&
-          !SameValue(desc.getValue(), current.getValue())) {
-        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      // Step 8
+      if (!IsGenericDescriptor(desc)) {
+        // Step 9a
+        if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+          throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+        // Step 10a
+        if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+          if (!current.isWritable() && desc.isWritable())
+            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          if (!current.isWritable() && desc.hasValue() &&
+              !SameValue(desc.getValue(), current.getValue())) {
+            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          }
+        }
+        // Step 11
+        if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+          if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          }
+          if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+        }
       }
     }
-    // Step 11
-    if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
-      if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
-        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
-      }
-      if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
-        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
-    }
   }
 
   // Send flags - enumerable and configurable are common - writable is
@@ -607,7 +634,16 @@
   } else
     flag |= DONT_DELETE;
 
-  if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+  if (IsDataDescriptor(desc) ||
+      (IsGenericDescriptor(desc) &&
+       (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
+    // There are 3 cases that lead here:
+    // Step 4a - defining a new data property.
+    // Steps 9b & 12 - replacing an existing accessor property with a data
+    //                 property.
+    // Step 12 - updating an existing data property with a data or generic
+    //           descriptor.
+
     if (desc.hasWritable()) {
       flag |= desc.isWritable() ? 0 : READ_ONLY;
     } else if (!IS_UNDEFINED(current)) {
@@ -615,20 +651,30 @@
     } else {
       flag |= READ_ONLY;
     }
+
     var value = void 0;  // Default value is undefined.
     if (desc.hasValue()) {
       value = desc.getValue();
-    } else if (!IS_UNDEFINED(current)) {
+    } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
       value = current.getValue();
     }
+
     %DefineOrRedefineDataProperty(obj, p, value, flag);
+  } else if (IsGenericDescriptor(desc)) {
+    // Step 12 - updating an existing accessor property with generic
+    //           descriptor. Changing flags only.
+    %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
   } else {
-    if (desc.hasGetter() &&
-        (IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) {
-       %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+    // There are 3 cases that lead here:
+    // Step 4b - defining a new accessor property.
+    // Steps 9c & 12 - replacing an existing data property with an accessor
+    //                 property.
+    // Step 12 - updating an existing accessor property with an accessor
+    //           descriptor.
+    if (desc.hasGetter()) {
+      %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
     }
-    if (desc.hasSetter() &&
-        (IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
+    if (desc.hasSetter()) {
       %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
     }
   }
diff --git a/src/version.cc b/src/version.cc
index e17f780..ac1887d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     1
-#define BUILD_NUMBER      1
+#define BUILD_NUMBER      2
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 999306e..ef069a9 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -916,6 +916,23 @@
 }
 
 
+// Calls directly to the given address using a relative offset.
+// Should only ever be used in Code objects for calls within the
+// same Code object. Should not be used when generating new code (use labels),
+// but only when patching existing code.
+void Assembler::call(Address target) {
+  positions_recorder()->WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1000 #32-bit disp.
+  emit(0xE8);
+  Address source = pc_ + 4;
+  intptr_t displacement = target - source;
+  ASSERT(is_int32(displacement));
+  emitl(static_cast<int32_t>(displacement));
+}
+
+
 void Assembler::clc() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -3012,6 +3029,16 @@
 }
 
 
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x50);
+  emit_sse_operand(dst, src);
+}
+
 
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 29817a3..c597783 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -553,10 +553,12 @@
   // TODO(X64): Rename this, removing the "Real", after changing the above.
   static const int kRealPatchReturnSequenceAddressOffset = 2;
 
-  // The x64 JS return sequence is padded with int3 to make it large
-  // enough to hold a call instruction when the debugger patches it.
+  // Some x64 JS code is padded with int3 to make it large
+  // enough to hold an instruction when the debugger patches it.
+  static const int kJumpInstructionLength = 13;
   static const int kCallInstructionLength = 13;
   static const int kJSReturnSequenceLength = 13;
+  static const int kShortCallInstructionLength = 5;
 
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
@@ -585,7 +587,7 @@
 
   // Insert the smallest number of nop instructions
   // possible to align the pc offset to a multiple
-  // of m. m must be a power of 2.
+  // of m, where m must be a power of 2.
   void Align(int m);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
@@ -894,6 +896,10 @@
     arithmetic_op(0x0B, dst, src);
   }
 
+  void orl(Register dst, const Operand& src) {
+    arithmetic_op_32(0x0B, dst, src);
+  }
+
   void or_(const Operand& dst, Register src) {
     arithmetic_op(0x09, src, dst);
   }
@@ -1057,6 +1063,18 @@
     arithmetic_op_32(0x33, dst, src);
   }
 
+  void xorl(Register dst, const Operand& src) {
+    arithmetic_op_32(0x33, dst, src);
+  }
+
+  void xorl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x6, dst, src);
+  }
+
+  void xorl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x6, dst, src);
+  }
+
   void xor_(Register dst, const Operand& src) {
     arithmetic_op(0x33, dst, src);
   }
@@ -1111,6 +1129,12 @@
   void call(Label* L);
   void call(Handle<Code> target, RelocInfo::Mode rmode);
 
+  // Calls directly to the given address using a relative offset.
+  // Should only ever be used in Code objects for calls within the
+  // same Code object. Should not be used when generating new code (use labels),
+  // but only when patching existing code.
+  void call(Address target);
+
   // Call near absolute indirect, address in register
   void call(Register adr);
 
@@ -1254,6 +1278,8 @@
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
 
+  void movmskpd(Register dst, XMMRegister src);
+
   // The first argument is the reg field, the second argument is the r/m field.
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 40e6138..b0cadda 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1037,29 +1037,6 @@
 }
 
 
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
-    MacroAssembler* masm) {
-  __ pop(rcx);  // Save return address.
-  // Left and right arguments are already on top of the stack.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
-  __ Push(Smi::FromInt(MinorKey()));
-  __ Push(Smi::FromInt(op_));
-  __ Push(Smi::FromInt(operands_type_));
-
-  __ push(rcx);  // Push return address.
-
-  // Patch the caller to an appropriate specialized stub and return the
-  // operation result to the caller of the stub.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
-      5,
-      1);
-}
-
-
 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
     case TRBinaryOpIC::UNINITIALIZED:
@@ -1069,7 +1046,9 @@
       GenerateSmiStub(masm);
       break;
     case TRBinaryOpIC::INT32:
-      GenerateInt32Stub(masm);
+      UNREACHABLE();
+      // The int32 case is identical to the Smi case.  We avoid creating this
+      // ic state on x64.
       break;
     case TRBinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
@@ -1112,85 +1091,428 @@
 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-  UNIMPLEMENTED();
+
+  // We only generate heapnumber answers for overflowing calculations
+  // for the four basic arithmetic operations.
+  bool generate_inline_heapnumber_results =
+      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+      (op_ == Token::ADD || op_ == Token::SUB ||
+       op_ == Token::MUL || op_ == Token::DIV);
+
+  // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+  Register left = rdx;
+  Register right = rax;
+
+
+  // Smi check of both operands.  If op is BIT_OR, the check is delayed
+  // until after the OR operation.
+  Label not_smis;
+  Label use_fp_on_smis;
+  Label restore_MOD_registers;  // Only used if op_ == Token::MOD.
+
+  if (op_ != Token::BIT_OR) {
+    Comment smi_check_comment(masm, "-- Smi check arguments");
+    __ JumpIfNotBothSmi(left, right, &not_smis);
+  }
+
+  // Perform the operation.
+  Comment perform_smi(masm, "-- Perform smi operation");
+  switch (op_) {
+    case Token::ADD:
+      ASSERT(right.is(rax));
+      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
+      break;
+
+    case Token::SUB:
+      __ SmiSub(left, left, right, &use_fp_on_smis);
+      __ movq(rax, left);
+      break;
+
+    case Token::MUL:
+      ASSERT(right.is(rax));
+      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
+      break;
+
+    case Token::DIV:
+      // SmiDiv will not accept left in rdx or right in rax.
+      left = rcx;
+      right = rbx;
+      __ movq(rbx, rax);
+      __ movq(rcx, rdx);
+      __ SmiDiv(rax, left, right, &use_fp_on_smis);
+      break;
+
+    case Token::MOD:
+      // SmiMod will not accept left in rdx or right in rax.
+      left = rcx;
+      right = rbx;
+      __ movq(rbx, rax);
+      __ movq(rcx, rdx);
+      __ SmiMod(rax, left, right, &use_fp_on_smis);
+      break;
+
+    case Token::BIT_OR: {
+      ASSERT(right.is(rax));
+      __ movq(rcx, right);  // Save the right operand.
+      __ SmiOr(right, right, left);  // BIT_OR is commutative.
+      __ JumpIfNotSmi(right, &not_smis);  // Test delayed until after BIT_OR.
+      break;
+      }
+    case Token::BIT_XOR:
+      ASSERT(right.is(rax));
+      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
+      break;
+
+    case Token::BIT_AND:
+      ASSERT(right.is(rax));
+      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
+      break;
+
+    case Token::SHL:
+      __ SmiShiftLeft(left, left, right);
+      __ movq(rax, left);
+      break;
+
+    case Token::SAR:
+      __ SmiShiftArithmeticRight(left, left, right);
+      __ movq(rax, left);
+      break;
+
+    case Token::SHR:
+      __ SmiShiftLogicalRight(left, left, right, &not_smis);
+      __ movq(rax, left);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // 5. Emit return of result in rax.  Some operations have registers pushed.
+  __ ret(0);
+
+  // 6. For some operations emit inline code to perform floating point
+  //    operations on known smis (e.g., if the result of the operation
+  //    overflowed the smi range).
+  __ bind(&use_fp_on_smis);
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    // Restore left and right to rdx and rax.
+    __ movq(rdx, rcx);
+    __ movq(rax, rbx);
+  }
+
+
+  if (generate_inline_heapnumber_results) {
+    __ AllocateHeapNumber(rcx, rbx, slow);
+    Comment perform_float(masm, "-- Perform float operation on smis");
+    FloatingPointHelper::LoadSSE2SmiOperands(masm);
+    switch (op_) {
+      case Token::ADD: __ addsd(xmm0, xmm1); break;
+      case Token::SUB: __ subsd(xmm0, xmm1); break;
+      case Token::MUL: __ mulsd(xmm0, xmm1); break;
+      case Token::DIV: __ divsd(xmm0, xmm1); break;
+      default: UNREACHABLE();
+    }
+    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+    __ movq(rax, rcx);
+    __ ret(0);
+  }
+
+  // 7. Non-smi operands reach the end of the code generated by
+  //    GenerateSmiCode, and fall through to subsequent code,
+  //    with the operands in rdx and rax.
+  Comment done_comment(masm, "-- Enter non-smi code");
+  __ bind(&not_smis);
+  if (op_ == Token::BIT_OR) {
+    __ movq(right, rcx);
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
+    MacroAssembler* masm,
+    Label* allocation_failure,
+    Label* non_numeric_failure) {
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
+
+      switch (op_) {
+        case Token::ADD: __ addsd(xmm0, xmm1); break;
+        case Token::SUB: __ subsd(xmm0, xmm1); break;
+        case Token::MUL: __ mulsd(xmm0, xmm1); break;
+        case Token::DIV: __ divsd(xmm0, xmm1); break;
+        default: UNREACHABLE();
+      }
+      GenerateHeapResultAllocation(masm, allocation_failure);
+      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+      __ ret(0);
+      break;
+    }
+    case Token::MOD: {
+      // For MOD we jump to the allocation_failure label, to call runtime.
+      __ jmp(allocation_failure);
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      Label non_smi_shr_result;
+      Register heap_number_map = r9;
+      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+      FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
+                                          heap_number_map);
+      switch (op_) {
+        case Token::BIT_OR:  __ orl(rax, rcx); break;
+        case Token::BIT_AND: __ andl(rax, rcx); break;
+        case Token::BIT_XOR: __ xorl(rax, rcx); break;
+        case Token::SAR: __ sarl_cl(rax); break;
+        case Token::SHL: __ shll_cl(rax); break;
+        case Token::SHR: {
+          __ shrl_cl(rax);
+          // Check if result is negative. This can only happen for a shift
+          // by zero.
+          __ testl(rax, rax);
+          __ j(negative, &non_smi_shr_result);
+          break;
+        }
+        default: UNREACHABLE();
+      }
+      STATIC_ASSERT(kSmiValueSize == 32);
+      // Tag smi result and return.
+      __ Integer32ToSmi(rax, rax);
+      __ Ret();
+
+      // Logical shift right can produce an unsigned int32 that is not
+      // an int32, and so is not in the smi range.  Allocate a heap number
+      // in that case.
+      if (op_ == Token::SHR) {
+        __ bind(&non_smi_shr_result);
+        Label allocation_failed;
+        __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
+        // Allocate heap number in new space.
+        // Not using AllocateHeapNumber macro in order to reuse
+        // already loaded heap_number_map.
+        __ AllocateInNewSpace(HeapNumber::kSize,
+                              rax,
+                              rcx,
+                              no_reg,
+                              &allocation_failed,
+                              TAG_OBJECT);
+        // Set the map.
+        if (FLAG_debug_code) {
+          __ AbortIfNotRootValue(heap_number_map,
+                                 Heap::kHeapNumberMapRootIndex,
+                                 "HeapNumberMap register clobbered.");
+        }
+        __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+                heap_number_map);
+        __ cvtqsi2sd(xmm0, rbx);
+        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+        __ Ret();
+
+        __ bind(&allocation_failed);
+        // We need tagged values in rdx and rax for the following code,
+        // not int32 in rax and rcx.
+        __ Integer32ToSmi(rax, rcx);
+        __ Integer32ToSmi(rdx, rax);
+        __ jmp(allocation_failure);
+      }
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+  // No fall-through from this generated code.
+  if (FLAG_debug_code) {
+    __ Abort("Unexpected fall-through in "
+             "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  // Registers containing left and right operands respectively.
+  Register lhs = rdx;
+  Register rhs = rax;
+
+  // Test for string arguments before calling runtime.
+  Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+  __ JumpIfNotString(lhs, r8, &not_string1);
+
+  // First argument is a a string, test second.
+  __ JumpIfSmi(rhs, &string1_smi2);
+  __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+  __ j(above_equal, &string1);
+
+  // First and second argument are strings.
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&string1_smi2);
+  // First argument is a string, second is a smi. Try to lookup the number
+  // string for the smi in the number string cache.
+  NumberToStringStub::GenerateLookupNumberStringCache(
+      masm, rhs, rbx, rcx, r8, true, &string1);
+
+  // Replace second argument on stack and tailcall string add stub to make
+  // the result.
+  __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+  __ TailCallStub(&string_add_stub);
+
+  // Only first argument is a string.
+  __ bind(&string1);
+  __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+  // First argument was not a string, test second.
+  __ bind(&not_string1);
+  __ JumpIfNotString(rhs, rhs, &not_strings);
+
+  // Only second argument is a string.
+  __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+  __ bind(&not_strings);
+  // Neither argument is a string.
+  // Pop arguments, because CallRuntimeCode wants to push them again.
+  __ pop(rcx);
+  __ pop(rax);
+  __ pop(rdx);
+  __ push(rcx);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  Label call_runtime;
+  Label not_smi;
 
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-      break;
-    case Token::MOD:
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR:
-      GenerateRegisterArgsPush(masm);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
 
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
-    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
-  } else {
-    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
-  }
-  __ bind(&call_runtime);
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-      GenerateTypeTransition(masm);
-      break;
-    case Token::MOD:
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR:
-      GenerateTypeTransitionWithSavedArgs(masm);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  __ bind(&not_smi);
+  GenerateTypeTransition(masm);
 }
 
 
 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  UNIMPLEMENTED();
-}
+  ASSERT(op_ == Token::ADD);
+  GenerateStringAddCode(masm);
 
-
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  UNIMPLEMENTED();
+  GenerateTypeTransition(masm);
 }
 
 
 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
-  UNIMPLEMENTED();
+  Label gc_required, not_number;
+  GenerateFloatingPointCode(masm, &gc_required, &not_number);
+
+  __ bind(&not_number);
+  GenerateTypeTransition(masm);
+
+  __ bind(&gc_required);
+  GenerateCallRuntimeCode(masm);
 }
 
 
 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED();
+  Label call_runtime, call_string_add_or_runtime;
+
+  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+
+  __ bind(&call_string_add_or_runtime);
+  if (op_ == Token::ADD) {
+    GenerateStringAddCode(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntimeCode(masm);
 }
 
 
 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Label* alloc_failure) {
-  UNIMPLEMENTED();
+  Label skip_allocation;
+  OverwriteMode mode = mode_;
+  switch (mode) {
+    case OVERWRITE_LEFT: {
+      // If the argument in rdx is already an object, we skip the
+      // allocation of a heap number.
+      __ JumpIfNotSmi(rdx, &skip_allocation);
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+      // Now rdx can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ movq(rdx, rbx);
+      __ bind(&skip_allocation);
+      // Use object in rdx as a result holder
+      __ movq(rax, rdx);
+      break;
+    }
+    case OVERWRITE_RIGHT:
+      // If the argument in rax is already an object, we skip the
+      // allocation of a heap number.
+      __ JumpIfNotSmi(rax, &skip_allocation);
+      // Fall through!
+    case NO_OVERWRITE:
+      // Allocate a heap number for the result. Keep rax and rdx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+      // Now rax can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ movq(rax, rbx);
+      __ bind(&skip_allocation);
+      break;
+    default: UNREACHABLE();
+  }
 }
 
 
@@ -1512,6 +1834,7 @@
 
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
+// Jump to conversion_failure: rdx and rax are unchanged.
 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
                                          Label* conversion_failure,
                                          Register heap_number_map) {
@@ -1521,28 +1844,27 @@
   Label load_arg2, done;
 
   __ JumpIfNotSmi(rdx, &arg1_is_object);
-  __ SmiToInteger32(rdx, rdx);
+  __ SmiToInteger32(r8, rdx);
   __ jmp(&load_arg2);
 
   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
   __ bind(&check_undefined_arg1);
   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, conversion_failure);
-  __ movl(rdx, Immediate(0));
+  __ movl(r8, Immediate(0));
   __ jmp(&load_arg2);
 
   __ bind(&arg1_is_object);
   __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, &check_undefined_arg1);
-  // Get the untagged integer version of the edx heap number in rcx.
-  IntegerConvert(masm, rdx, rdx);
+  // Get the untagged integer version of the rdx heap number in rcx.
+  IntegerConvert(masm, r8, rdx);
 
-  // Here rdx has the untagged integer, rax has a Smi or a heap number.
+  // Here r8 has the untagged integer, rax has a Smi or a heap number.
   __ bind(&load_arg2);
   // Test if arg2 is a Smi.
   __ JumpIfNotSmi(rax, &arg2_is_object);
-  __ SmiToInteger32(rax, rax);
-  __ movl(rcx, rax);
+  __ SmiToInteger32(rcx, rax);
   __ jmp(&done);
 
   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
@@ -1558,7 +1880,7 @@
   // Get the untagged integer version of the rax heap number in rcx.
   IntegerConvert(masm, rcx, rax);
   __ bind(&done);
-  __ movl(rax, rdx);
+  __ movl(rax, r8);
 }
 
 
@@ -1888,11 +2210,11 @@
   }
 
   // Stack frame on entry.
-  //  esp[0]: return address
-  //  esp[8]: last_match_info (expected JSArray)
-  //  esp[16]: previous index
-  //  esp[24]: subject string
-  //  esp[32]: JSRegExp object
+  //  rsp[0]: return address
+  //  rsp[8]: last_match_info (expected JSArray)
+  //  rsp[16]: previous index
+  //  rsp[24]: subject string
+  //  rsp[32]: JSRegExp object
 
   static const int kLastMatchInfoOffset = 1 * kPointerSize;
   static const int kPreviousIndexOffset = 2 * kPointerSize;
@@ -2234,7 +2556,7 @@
   // Smi-tagging is equivalent to multiplying by 2.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
-  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // Allocate RegExpResult followed by FixedArray with size in rbx.
   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
   // Elements:  [Map][Length][..elements..]
   __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
@@ -2293,7 +2615,7 @@
   Label loop;
   __ testl(rbx, rbx);
   __ bind(&loop);
-  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ j(less_equal, &done);  // Jump if rcx is negative or zero.
   __ subl(rbx, Immediate(1));
   __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
   __ jmp(&loop);
@@ -2656,7 +2978,7 @@
     // undefined, and are equal.
     __ Set(rax, EQUAL);
     __ bind(&return_unequal);
-    // Return non-equal by returning the non-zero object pointer in eax,
+    // Return non-equal by returning the non-zero object pointer in rax,
     // or return equal if we fell through to here.
     __ ret(0);
     __ bind(&not_both_objects);
@@ -3151,7 +3473,7 @@
   __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  // If current EBP value is the same as js_entry_sp value, it means that
+  // If current RBP value is the same as js_entry_sp value, it means that
   // the current function is the outermost.
   __ movq(kScratchRegister, js_entry_sp);
   __ cmpq(rbp, Operand(kScratchRegister, 0));
@@ -4414,6 +4736,53 @@
   __ jmp(rdi);
 }
 
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements,
+                                Register untagged_key,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  //   key - holds the key and is unchanged (must be a smi).
+  //   elements - is set to the the receiver's element if
+  //       the receiver doesn't have a pixel array or the
+  //       key is not a smi, otherwise it's the elements'
+  //       external pointer.
+  //   untagged_key - is set to the untagged key
+
+  // Some callers already have verified that the key is a smi.  key_not_smi is
+  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
+  // to ensure the key is a smi must be added.
+  if (key_not_smi != NULL) {
+    __ JumpIfNotSmi(key, key_not_smi);
+  } else {
+    if (FLAG_debug_code) {
+      __ AbortIfNotSmi(key);
+    }
+  }
+  __ SmiToInteger32(untagged_key, key);
+
+  // Verify that the receiver has pixel array elements.
+  __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+  // Check that the smi is in range.
+  __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+  __ j(above_equal, out_of_range);  // unsigned check handles negative keys.
+
+  // Load and tag the element as a smi.
+  __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+  __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
+  __ Integer32ToSmi(result, result);
+  __ ret(0);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 9feced2..8051d4b 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -270,6 +270,11 @@
   void GenerateSmiCode(MacroAssembler* masm,
                        Label* slow,
                        SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateFloatingPointCode(MacroAssembler* masm,
+                                 Label* allocation_failure,
+                                 Label* non_numeric_failure);
+  void GenerateStringAddCode(MacroAssembler* masm);
+  void GenerateCallRuntimeCode(MacroAssembler* masm);
   void GenerateLoadArguments(MacroAssembler* masm);
   void GenerateReturn(MacroAssembler* masm);
   void GenerateUninitializedStub(MacroAssembler* masm);
@@ -447,6 +452,25 @@
 };
 
 
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+                                Register receiver,
+                                Register key,
+                                Register elements,
+                                Register untagged_key,
+                                Register result,
+                                Label* not_pixel_array,
+                                Label* key_not_smi,
+                                Label* out_of_range);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 91686f9..b8069a2 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -5402,9 +5402,12 @@
       }
       frame_->PushParameterAt(-1);
 
+      // Push the strict mode flag.
+      frame_->Push(Smi::FromInt(strict_mode_flag()));
+
       // Resolve the call.
       result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
 
       done.Jump(&result);
       slow.Bind();
@@ -5421,8 +5424,11 @@
     }
     frame_->PushParameterAt(-1);
 
+    // Push the strict mode flag.
+    frame_->Push(Smi::FromInt(strict_mode_flag()));
+
     // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
     // If we generated fast-case code bind the jump-target where fast
     // and slow case merge.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index b308f64..c283db3 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -357,6 +357,7 @@
   // Accessors
   inline bool is_eval();
   inline Scope* scope();
+  inline StrictModeFlag strict_mode_flag();
 
   // Generating deferred code.
   void ProcessDeferred();
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 29b9023..ed6c47b 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -42,10 +42,65 @@
 
 
 int Deoptimizer::patch_size() {
-  return Assembler::kCallInstructionLength;
+  return MacroAssembler::kCallInstructionLength;
 }
 
 
+#ifdef DEBUG
+// Overwrites code with int3 instructions.
+static void ZapCodeRange(Address from, Address to) {
+  CHECK(from <= to);
+  int length = static_cast<int>(to - from);
+  CodePatcher destroyer(from, length);
+  while (length-- > 0) {
+    destroyer.masm()->int3();
+  }
+}
+#endif
+
+
+// Iterate through the entries of a SafepointTable that corresponds to
+// deoptimization points.
+class SafepointTableDeoptimiztionEntryIterator {
+ public:
+  explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
+      : code_(code), table_(code), index_(-1), limit_(table_.length()) {
+    FindNextIndex();
+  }
+
+  SafepointEntry Next(Address* pc) {
+    if (index_ >= limit_) {
+      *pc = NULL;
+      return SafepointEntry();  // Invalid entry.
+    }
+    *pc = code_->instruction_start() + table_.GetPcOffset(index_);
+    SafepointEntry entry = table_.GetEntry(index_);
+    FindNextIndex();
+    return entry;
+  }
+
+ private:
+  void FindNextIndex() {
+    ASSERT(index_ < limit_);
+    while (++index_ < limit_) {
+      if (table_.GetEntry(index_).deoptimization_index() !=
+          Safepoint::kNoDeoptimizationIndex) {
+        return;
+      }
+    }
+  }
+
+  Code* code_;
+  SafepointTable table_;
+  // Index of next deoptimization entry. If negative after calling
+  // FindNextIndex, there are no more, and Next will return an invalid
+  // SafepointEntry.
+  int index_;
+  // Table length.
+  int limit_;
+};
+
+
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
   AssertNoAllocation no_allocation;
 
@@ -59,42 +114,74 @@
   code->InvalidateRelocation();
 
   // For each return after a safepoint insert a absolute call to the
-  // corresponding deoptimization entry.
-  unsigned last_pc_offset = 0;
-  SafepointTable table(function->code());
-  for (unsigned i = 0; i < table.length(); i++) {
-    unsigned pc_offset = table.GetPcOffset(i);
-    SafepointEntry safepoint_entry = table.GetEntry(i);
-    int deoptimization_index = safepoint_entry.deoptimization_index();
-    int gap_code_size = safepoint_entry.gap_code_size();
+  // corresponding deoptimization entry, or a short call to an absolute
+  // jump if space is short. The absolute jumps are put in a table just
+  // before the safepoint table (space was allocated there when the Code
+  // object was created, if necessary).
+
+  Address instruction_start = function->code()->instruction_start();
+  Address jump_table_address =
+      instruction_start + function->code()->safepoint_table_offset();
+  Address previous_pc = instruction_start;
+
+  SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
+  Address entry_pc = NULL;
+
+  SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
+  while (current_entry.is_valid()) {
+    int gap_code_size = current_entry.gap_code_size();
+    unsigned deoptimization_index = current_entry.deoptimization_index();
+
 #ifdef DEBUG
     // Destroy the code which is not supposed to run again.
-    unsigned instructions = pc_offset - last_pc_offset;
-    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                          instructions);
-    for (unsigned i = 0; i < instructions; i++) {
-      destroyer.masm()->int3();
-    }
+    ZapCodeRange(previous_pc, entry_pc);
 #endif
-    last_pc_offset = pc_offset;
-    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      last_pc_offset += gap_code_size;
-      CodePatcher patcher(code->instruction_start() + last_pc_offset,
-                          patch_size());
+    // Position where Call will be patched in.
+    Address call_address = entry_pc + gap_code_size;
+    // End of call instruction, if using a direct call to a 64-bit address.
+    Address call_end_address =
+        call_address + MacroAssembler::kCallInstructionLength;
+
+    // Find next deoptimization entry, if any.
+    Address next_pc = NULL;
+    SafepointEntry next_entry = deoptimizations.Next(&next_pc);
+
+    if (!next_entry.is_valid() || next_pc >= call_end_address) {
+      // Room enough to write a long call instruction.
+      CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
       patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
                            RelocInfo::NONE);
-      last_pc_offset += patch_size();
+      previous_pc = call_end_address;
+    } else {
+      // Not room enough for a long Call instruction. Write a short call
+      // instruction to a long jump placed elsewhere in the code.
+      Address short_call_end_address =
+          call_address + MacroAssembler::kShortCallInstructionLength;
+      ASSERT(next_pc >= short_call_end_address);
+
+      // Write jump in jump-table.
+      jump_table_address -= MacroAssembler::kJumpInstructionLength;
+      CodePatcher jump_patcher(jump_table_address,
+                               MacroAssembler::kJumpInstructionLength);
+      jump_patcher.masm()->Jump(
+          GetDeoptimizationEntry(deoptimization_index, LAZY),
+          RelocInfo::NONE);
+
+      // Write call to jump at call_offset.
+      CodePatcher call_patcher(call_address,
+                               MacroAssembler::kShortCallInstructionLength);
+      call_patcher.masm()->call(jump_table_address);
+      previous_pc = short_call_end_address;
     }
+
+    // Continue with next deoptimization entry.
+    current_entry = next_entry;
+    entry_pc = next_pc;
   }
+
 #ifdef DEBUG
   // Destroy the code which is not supposed to run again.
-  CHECK(code->safepoint_table_start() >= last_pc_offset);
-  unsigned instructions = code->safepoint_table_start() - last_pc_offset;
-  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                        instructions);
-  for (unsigned i = 0; i < instructions; i++) {
-    destroyer.masm()->int3();
-  }
+  ZapCodeRange(previous_pc, jump_table_address);
 #endif
 
   // Add the deoptimizing code to the list.
@@ -390,7 +477,7 @@
     __ pop(Operand(rbx, offset));
   }
 
-    // Fill in the double input registers.
+  // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
     int dst_offset = i * kDoubleSize + double_regs_offset;
@@ -404,7 +491,7 @@
     __ addq(rsp, Immediate(2 * kPointerSize));
   }
 
-  // Compute a pointer to the unwinding limit in register ecx; that is
+  // Compute a pointer to the unwinding limit in register rcx; that is
   // the first stack slot not part of the input frame.
   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
   __ addq(rcx, rsp);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 92a8a7d..f73f948 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1046,6 +1046,8 @@
           mnemonic = "ucomisd";
         } else if (opcode == 0x2F) {
           mnemonic = "comisd";
+        } else if (opcode == 0x50) {
+          mnemonic = "movmskpd";
         } else {
           UnimplementedInstruction();
         }
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a2a0e7e..998b3e9 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,7 +31,7 @@
 namespace v8 {
 namespace internal {
 
-static const int kNumRegs = 8;
+static const int kNumRegs = 16;
 static const RegList kJSCallerSaved =
     1 << 0 |  // rax
     1 << 1 |  // rcx
@@ -44,8 +44,7 @@
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 // Number of registers for which space is reserved in safepoints.
-// TODO(x64): This should not be 0.
-static const int kNumSafepointRegisters = 8;
+static const int kNumSafepointRegisters = 16;
 
 // ----------------------------------------------------
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 2a30bb8..9144874 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1529,14 +1529,9 @@
   __ j(smi, &smi_case);
 
   __ bind(&stub_call);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  if (stub.ArgsInRegistersSupported()) {
-    stub.GenerateCall(masm_, rdx, rcx);
-  } else {
-    __ push(rdx);
-    __ push(rcx);
-    __ CallStub(&stub);
-  }
+  TypeRecordingBinaryOpStub stub(op, mode);
+  __ movq(rax, rcx);
+  __ CallStub(&stub);
   __ jmp(&done);
 
   __ bind(&smi_case);
@@ -1580,14 +1575,9 @@
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      OverwriteMode mode) {
-  GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
-  if (stub.ArgsInRegistersSupported()) {
-    __ pop(rdx);
-    stub.GenerateCall(masm_, rdx, rax);
-  } else {
-    __ push(result_register());
-    __ CallStub(&stub);
-  }
+  TypeRecordingBinaryOpStub stub(op, mode);
+  __ pop(rdx);
+  __ CallStub(&stub);
   context()->Plug(rax);
 }
 
@@ -1934,7 +1924,9 @@
 
       // Push the receiver of the enclosing function and do runtime call.
       __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
-      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+      // Push the strict mode flag.
+      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -3217,6 +3209,7 @@
     // the first smi check before calling ToNumber.
     is_smi = masm_->CheckSmi(rax);
     __ j(is_smi, &done);
+
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
@@ -3230,12 +3223,16 @@
   SetSourcePosition(expr->position());
 
   // Call stub for +1/-1.
-  GenericBinaryOpStub stub(expr->binary_op(),
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS);
-  stub.GenerateCall(masm_, rax, Smi::FromInt(1));
-  __ bind(&done);
+  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+  if (expr->op() == Token::INC) {
+    __ Move(rdx, Smi::FromInt(1));
+  } else {
+    __ movq(rdx, rax);
+    __ Move(rax, Smi::FromInt(1));
+  }
+  __ CallStub(&stub);
 
+  __ bind(&done);
   // Store the value returned in rax.
   switch (assign_type) {
     case VARIABLE:
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index d060e31..e5a4bfc 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -580,20 +580,15 @@
   __ ret(0);
 
   __ bind(&check_pixel_array);
-  // Check whether the elements object is a pixel array.
-  // rdx: receiver
-  // rax: key
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ SmiToInteger32(rbx, rax);  // Used on both directions of next branch.
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
-                 Heap::kPixelArrayMapRootIndex);
-  __ j(not_equal, &check_number_dictionary);
-  __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
-  __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
-  __ Integer32ToSmi(rax, rax);
-  __ ret(0);
+  GenerateFastPixelArrayLoad(masm,
+                             rdx,
+                             rax,
+                             rcx,
+                             rbx,
+                             rax,
+                             &check_number_dictionary,
+                             NULL,
+                             &slow);
 
   __ bind(&check_number_dictionary);
   // Check whether the elements is a number dictionary.
@@ -1703,7 +1698,8 @@
 }
 
 void PatchInlinedSmiCode(Address address) {
-  UNIMPLEMENTED();
+  // Disabled, then patched inline smi code is not implemented on X64.
+  // So we do nothing in this case.
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 4f1efc4..48d4130 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -53,7 +53,7 @@
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
   code->set_stack_slots(StackSlotCount());
-  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
 
@@ -188,9 +188,19 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  // Ensure that patching a deoptimization point won't overwrite the table.
-  for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
-    masm()->int3();
+  // Ensure that there is space at the end of the code to write a number
+  // of jump instructions, as well as to afford writing a call near the end
+  // of the code.
+  // The jumps are used when there isn't room in the code stream to write
+  // a long call instruction. Instead it writes a shorter call to a
+  // jump instruction in the same code object.
+  // The calls are used when lazy deoptimizing a function and calls to a
+  // deoptimization function.
+  int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
+      static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
+  int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
+  while (byte_count-- > 0) {
+    __ int3();
   }
   safepoints_.Emit(masm(), StackSlotCount());
   return !is_aborted();
@@ -364,7 +374,13 @@
 void LCodeGen::CallRuntime(Runtime::Function* function,
                            int num_arguments,
                            LInstruction* instr) {
-  Abort("Unimplemented: %s", "CallRuntime");
+  ASSERT(instr != NULL);
+  ASSERT(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  RegisterLazyDeoptimization(instr);
 }
 
 
@@ -499,6 +515,7 @@
     int arguments,
     int deoptimization_index) {
   const ZoneList<LOperand*>* operands = pointers->operands();
+
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
@@ -599,11 +616,115 @@
 
 
 void LCodeGen::DoBitI(LBitI* instr) {
-  Abort("Unimplemented: %s", "DoBitI");}
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int right_operand = ToInteger32(LConstantOperand::cast(right));
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ andl(ToRegister(left), Immediate(right_operand));
+        break;
+      case Token::BIT_OR:
+        __ orl(ToRegister(left), Immediate(right_operand));
+        break;
+      case Token::BIT_XOR:
+        __ xorl(ToRegister(left), Immediate(right_operand));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else if (right->IsStackSlot()) {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ andl(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ orl(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xorl(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    ASSERT(right->IsRegister());
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ andl(ToRegister(left), ToRegister(right));
+        break;
+      case Token::BIT_OR:
+        __ orl(ToRegister(left), ToRegister(right));
+        break;
+      case Token::BIT_XOR:
+        __ xorl(ToRegister(left), ToRegister(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
 
 
 void LCodeGen::DoShiftI(LShiftI* instr) {
-  Abort("Unimplemented: %s", "DoShiftI");
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  if (right->IsRegister()) {
+    ASSERT(ToRegister(right).is(rcx));
+
+    switch (instr->op()) {
+      case Token::SAR:
+        __ sarl_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shrl_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ testl(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(negative, instr->environment());
+        }
+        break;
+      case Token::SHL:
+        __ shll_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sarl(ToRegister(left), Immediate(shift_count));
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ testl(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(negative, instr->environment());
+        } else {
+          __ shrl(ToRegister(left), Immediate(shift_count));
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ shll(ToRegister(left), Immediate(shift_count));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -657,18 +778,22 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-    ASSERT(instr->result()->IsRegister());
+  ASSERT(instr->result()->IsRegister());
   __ Move(ToRegister(instr->result()), instr->value());
 }
 
 
 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
-  Abort("Unimplemented: %s", "DoJSArrayLength");
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
 }
 
 
 void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
-  Abort("Unimplemented: %s", "DoFixedArrayLength");
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
 }
 
 
@@ -678,7 +803,9 @@
 
 
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
-  Abort("Unimplemented: %s", "DoBitNotI");
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->Equals(instr->result()));
+  __ not_(ToRegister(input));
 }
 
 
@@ -1193,7 +1320,7 @@
 
 void LCodeGen::DoHasCachedArrayIndexAndBranch(
     LHasCachedArrayIndexAndBranch* instr) {
-    Register input = ToRegister(instr->InputAt(0));
+  Register input = ToRegister(instr->InputAt(0));
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1389,32 +1516,32 @@
 
 
 void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
-  Register result = ToRegister(instr->result());
-  if (result.is(rax)) {
-    __ load_rax(instr->hydrogen()->cell().location(),
-                RelocInfo::GLOBAL_PROPERTY_CELL);
-  } else {
-    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(result, Operand(result, 0));
-  }
-  if (instr->hydrogen()->check_hole_value()) {
-    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(equal, instr->environment());
-  }
+  Abort("Unimplemented: %s", "DoLoadGlobal");
 }
 
 
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
-  if (value.is(rax)) {
+  Register temp = ToRegister(instr->TempAt(0));
+  ASSERT(!value.is(temp));
+  bool check_hole = instr->hydrogen()->check_hole_value();
+  if (!check_hole && value.is(rax)) {
     __ store_rax(instr->hydrogen()->cell().location(),
                  RelocInfo::GLOBAL_PROPERTY_CELL);
-  } else {
-    __ movq(kScratchRegister,
-            Handle<Object>::cast(instr->hydrogen()->cell()),
-            RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(Operand(kScratchRegister, 0), value);
+    return;
   }
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted. We deoptimize in that case.
+  __ movq(temp,
+          Handle<Object>::cast(instr->hydrogen()->cell()),
+          RelocInfo::GLOBAL_PROPERTY_CELL);
+  if (check_hole) {
+    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
+  __ movq(Operand(temp, 0), value);
 }
 
 
@@ -1446,7 +1573,19 @@
 
 
 void LCodeGen::DoLoadElements(LLoadElements* instr) {
-  Abort("Unimplemented: %s", "DoLoadElements");
+  ASSERT(instr->result()->Equals(instr->InputAt(0)));
+  Register reg = ToRegister(instr->InputAt(0));
+  __ movq(reg, FieldOperand(reg, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    NearLabel done;
+    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Factory::fixed_array_map());
+    __ j(equal, &done);
+    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Factory::fixed_cow_array_map());
+    __ Check(equal, "Check for fast elements failed.");
+    __ bind(&done);
+  }
 }
 
 
@@ -1456,7 +1595,20 @@
 
 
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
-  Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+  Register elements = ToRegister(instr->elements());
+  Register key = ToRegister(instr->key());
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(elements));
+
+  // Load the result.
+  __ movq(result, FieldOperand(elements,
+                               key,
+                               times_pointer_size,
+                               FixedArray::kHeaderSize));
+
+  // Check for the hole value.
+  __ Cmp(result, Factory::the_hole_value());
+  DeoptimizeIf(equal, instr->environment());
 }
 
 
@@ -1520,12 +1672,43 @@
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
                                  LInstruction* instr) {
-  Abort("Unimplemented: %s", "CallKnownFunction");
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+  }
+
+  // Set rax to arguments count if adaption is not needed. Assumes that rax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ Set(rax, arity);
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  if (*function == *graph()->info()->closure()) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
-  Abort("Unimplemented: %s", "DoCallConstantFunction");
+  ASSERT(ToRegister(instr->result()).is(rax));
+  __ Move(rdi, instr->function());
+  CallKnownFunction(instr->function(), instr->arity(), instr);
 }
 
 
@@ -1605,7 +1788,9 @@
 
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
-  Abort("Unimplemented: %s", "DoCallKnownGlobal");
+  ASSERT(ToRegister(instr->result()).is(rax));
+  __ Move(rdi, instr->target());
+  CallKnownFunction(instr->target(), instr->arity(), instr);
 }
 
 
@@ -1660,7 +1845,12 @@
 
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  Abort("Unimplemented: %s", "DoBoundsCheck");
+  if (instr->length()->IsRegister()) {
+    __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
+  } else {
+    __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
+  }
+  DeoptimizeIf(above_equal, instr->environment());
 }
 
 
@@ -1779,13 +1969,73 @@
 }
 
 
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+  NearLabel done, heap_number;
+  Register input_reg = ToRegister(instr->InputAt(0));
+
+  // Heap number map check.
+  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+
+  if (instr->truncating()) {
+    __ j(equal, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(not_equal, instr->environment());
+    __ movl(input_reg, Immediate(0));
+    __ jmp(&done);
+
+    __ bind(&heap_number);
+
+    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2siq(input_reg, xmm0);
+    __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+    __ cmpl(input_reg, kScratchRegister);
+    DeoptimizeIf(equal, instr->environment());
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(not_equal, instr->environment());
+
+    XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, xmm0);
+    __ cvtlsi2sd(xmm_temp, input_reg);
+    __ ucomisd(xmm0, xmm_temp);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ testl(input_reg, input_reg);
+      __ j(not_zero, &done);
+      __ movmskpd(input_reg, xmm0);
+      __ andl(input_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+  }
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  Abort("Unimplemented: %s", "DoTaggedToI");
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+  __ JumpIfNotSmi(input_reg, deferred->entry());
+  __ SmiToInteger32(input_reg, input_reg);
+  __ bind(deferred->exit());
 }
 
 
@@ -1811,7 +2061,33 @@
 
 
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
-  Abort("Unimplemented: %s", "DoCheckInstanceType");
+  Register input = ToRegister(instr->InputAt(0));
+  InstanceType first = instr->hydrogen()->first();
+  InstanceType last = instr->hydrogen()->last();
+
+  __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+
+  // If there is only one type in the interval check for equality.
+  if (first == last) {
+    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+            Immediate(static_cast<int8_t>(first)));
+    DeoptimizeIf(not_equal, instr->environment());
+  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+    // String has a dedicated bit in instance type.
+    __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+             Immediate(kIsNotStringMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+            Immediate(static_cast<int8_t>(first)));
+    DeoptimizeIf(below, instr->environment());
+    // Omit check for the last type.
+    if (last != LAST_TYPE) {
+      __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+              Immediate(static_cast<int8_t>(last)));
+      DeoptimizeIf(above, instr->environment());
+    }
+  }
 }
 
 
@@ -1866,12 +2142,47 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Abort("Unimplemented: %s", "DoArrayLiteral");
+  // Setup the parameters to the stub/runtime call.
+  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+  __ Push(instr->hydrogen()->constant_elements());
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
 }
 
 
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  Abort("Unimplemented: %s", "DoObjectLiteral");
+  // Setup the parameters to the stub/runtime call.
+  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+  __ Push(instr->hydrogen()->constant_properties());
+  __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+
+  // Pick the right runtime function to call.
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  }
 }
 
 
@@ -1881,7 +2192,20 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  Abort("Unimplemented: %s", "DoFunctionLiteral");
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (shared_info->num_literals() == 0 && !pretenure) {
+    FastNewClosureStub stub;
+    __ Push(shared_info);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ push(rsi);
+    __ Push(shared_info);
+    __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
 }
 
 
@@ -1994,7 +2318,6 @@
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   // Perform stack overflow check.
   NearLabel done;
-  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   __ j(above_equal, &done);
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 0a52c6d..12b952d 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -29,6 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
+#include "lithium-allocator-inl.h"
 #include "x64/lithium-x64.h"
 #include "x64/lithium-codegen-x64.h"
 
@@ -68,11 +69,35 @@
 }
 
 
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as
+  // temporaries and outputs because all registers
+  // are blocked by the calling convention.
+  // Inputs can use either fixed register or have a short lifetime (be
+  // used at start of the instruction).
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           LUnallocated::cast(operand)->IsUsedAtStart() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+  for (TempIterator it(this); it.HasNext(); it.Advance()) {
+    LOperand* operand = it.Next();
+    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+           !LUnallocated::cast(operand)->HasRegisterPolicy());
+  }
+}
+#endif
+
+
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
-  if (HasResult()) {
-    PrintOutputOperandTo(stream);
-  }
+
+  PrintOutputOperandTo(stream);
 
   PrintDataTo(stream);
 
@@ -162,6 +187,12 @@
     case Token::MUL: return "mul-t";
     case Token::MOD: return "mod-t";
     case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::SHL: return "sal-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
     default:
       UNREACHABLE();
       return NULL;
@@ -262,7 +293,8 @@
 
 
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
-  stream->Add("(%d, %d)", context_chain_length(), slot_index());
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
 }
 
 
@@ -318,7 +350,7 @@
 }
 
 
-LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
   // All stack slots are Double stack slots on x64.
   // Alternatively, at some point, start using half-size
   // stack slots for int32 values.
@@ -386,7 +418,7 @@
 }
 
 
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
   LGap* gap = new LGap(block);
   int index = -1;
   if (instr->IsControl()) {
@@ -402,7 +434,6 @@
     pointer_maps_.Add(instr->pointer_map());
     instr->pointer_map()->set_lithium_position(index);
   }
-  return index;
 }
 
 
@@ -670,7 +701,10 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  allocator_->MarkAsCall();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
   if (hinstr->HasSideEffects()) {
@@ -695,7 +729,7 @@
 
 
 LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  allocator_->MarkAsSaveDoubles();
+  instr->MarkAsSaveDoubles();
   return instr;
 }
 
@@ -740,8 +774,72 @@
 
 LInstruction* LChunkBuilder::DoBit(Token::Value op,
                                    HBitwiseBinaryOperation* instr) {
-  Abort("Unimplemented: %s", "DoBit");
-  return NULL;
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(op, left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsTagged()) {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
+
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseFixed(right_value, rcx);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LShiftI* result = new LShiftI(op, left, right, can_deopt);
+  return can_deopt
+      ? AssignEnvironment(DefineSameAsFirst(result))
+      : DefineSameAsFirst(result);
 }
 
 
@@ -836,7 +934,6 @@
 void LChunkBuilder::VisitInstruction(HInstruction* current) {
   HInstruction* old_current = current_instruction_;
   current_instruction_ = current;
-  allocator_->BeginInstruction();
   if (current->has_position()) position_ = current->position();
   LInstruction* instr = current->CompileToLithium(this);
 
@@ -859,11 +956,7 @@
       instr->set_hydrogen_value(current);
     }
 
-    int index = chunk_->AddInstruction(instr, current_block_);
-    allocator_->SummarizeInstruction(index);
-  } else {
-    // This instruction should be omitted.
-    allocator_->OmitInstruction();
+    chunk_->AddInstruction(instr, current_block_);
   }
   current_instruction_ = old_current;
 }
@@ -1006,8 +1099,9 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  Abort("Unimplemented: %s", "DoCompareMap");
-  return NULL;
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new LCmpMapAndBranch(value);
 }
 
 
@@ -1049,6 +1143,18 @@
 }
 
 
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  Abort("Unimplemented: DoContext");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  Abort("Unimplemented: DoOuterContext");
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   return DefineAsRegister(new LGlobalObject);
 }
@@ -1061,8 +1167,8 @@
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
-  Abort("Unimplemented: %s", "DoCallConstantFunction");
-  return NULL;
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
 }
 
 
@@ -1079,8 +1185,8 @@
 
 
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
-  Abort("Unimplemented: %s", "DoCallNamed");
-  return NULL;
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
 }
 
 
@@ -1091,8 +1197,8 @@
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
-  Abort("Unimplemented: %s", "DoCallKnownGlobal");
-  return NULL;
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
 }
 
 
@@ -1111,50 +1217,47 @@
 
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
-  Abort("Unimplemented: %s", "DoCallRuntime");
-  return NULL;
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoShr(HShr* instr) {
-  Abort("Unimplemented: %s", "DoShr");
-  return NULL;
+  return DoShift(Token::SHR, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoSar(HSar* instr) {
-  Abort("Unimplemented: %s", "DoSar");
-  return NULL;
+  return DoShift(Token::SAR, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoShl(HShl* instr) {
-  Abort("Unimplemented: %s", "DoShl");
-  return NULL;
+  return DoShift(Token::SHL, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  Abort("Unimplemented: %s", "DoBitAnd");
-  return NULL;
+  return DoBit(Token::BIT_AND, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
-  Abort("Unimplemented: %s", "DoBitNot");
-  return NULL;
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LBitNotI* result = new LBitNotI(input);
+  return DefineSameAsFirst(result);
 }
 
 
 LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  Abort("Unimplemented: %s", "DoBitOr");
-  return NULL;
+  return DoBit(Token::BIT_OR, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  Abort("Unimplemented: %s", "DoBitXor");
-  return NULL;
+  return DoBit(Token::BIT_XOR, instr);
 }
 
 
@@ -1305,14 +1408,14 @@
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
-  Abort("Unimplemented: %s", "DoJSArrayLength");
-  return NULL;
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
-  Abort("Unimplemented: %s", "DoFixedArrayLength");
-  return NULL;
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LFixedArrayLength(array));
 }
 
 
@@ -1323,8 +1426,8 @@
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  Abort("Unimplemented: %s", "DoBoundsCheck");
-  return NULL;
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
 }
 
 
@@ -1409,8 +1512,9 @@
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
-  Abort("Unimplemented: %s", "DoCheckInstanceType");
-  return NULL;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LCheckInstanceType* result = new LCheckInstanceType(value);
+  return AssignEnvironment(result);
 }
 
 
@@ -1472,7 +1576,10 @@
 
 
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  return new LStoreGlobal(UseRegisterAtStart(instr->value()));}
+  LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
+                                          TempRegister());
+  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+}
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
@@ -1481,6 +1588,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  Abort("Unimplemented: DoStoreContextSlot");
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
@@ -1502,15 +1615,19 @@
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
-  Abort("Unimplemented: %s", "DoLoadElements");
-  return NULL;
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
     HLoadKeyedFastElement* instr) {
-  Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
-  return NULL;
+  ASSERT(instr->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
@@ -1572,14 +1689,12 @@
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  Abort("Unimplemented: %s", "DoArrayLiteral");
-  return NULL;
+  return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  Abort("Unimplemented: %s", "DoObjectLiteral");
-  return NULL;
+  return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
 }
 
 
@@ -1590,8 +1705,7 @@
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  Abort("Unimplemented: %s", "DoFunctionLiteral");
-  return NULL;
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
 }
 
 
@@ -1699,7 +1813,8 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  Abort("Unimplemented: %s", "DoLeaveInlined");
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
   return NULL;
 }
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index abeb2a3..64d141e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -287,7 +287,11 @@
 class LInstruction: public ZoneObject {
  public:
   LInstruction()
-      : hydrogen_value_(NULL) { }
+      :  environment_(NULL),
+         hydrogen_value_(NULL),
+         is_call_(false),
+         is_save_doubles_(false) { }
+
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -304,16 +308,14 @@
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
 
-  void set_environment(LEnvironment* env) { environment_.set(env); }
-  LEnvironment* environment() const { return environment_.get(); }
-  bool HasEnvironment() const { return environment_.is_set(); }
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
 
   void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
   LPointerMap* pointer_map() const { return pointer_map_.get(); }
   bool HasPointerMap() const { return pointer_map_.is_set(); }
 
-  virtual bool HasResult() const = 0;
-
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
@@ -327,11 +329,35 @@
     return deoptimization_environment_.is_set();
   }
 
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
  private:
-  SetOncePointer<LEnvironment> environment_;
+  LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
   SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
@@ -358,6 +384,11 @@
  public:
   int length() { return 0; }
   void PrintOperandsTo(StringStream* stream) { }
+  ElementType& operator[](int i) {
+    UNREACHABLE();
+    static ElementType t = 0;
+    return t;
+  }
 };
 
 
@@ -1269,10 +1300,11 @@
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
  public:
-  explicit LStoreGlobal(LOperand* value) {
+  explicit LStoreGlobal(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
@@ -1280,12 +1312,16 @@
 };
 
 
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
   DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
 
-  int context_chain_length() { return hydrogen()->context_chain_length(); }
+  LOperand* context() { return InputAt(0); }
   int slot_index() { return hydrogen()->slot_index(); }
 
   virtual void PrintDataTo(StringStream* stream);
@@ -1602,11 +1638,10 @@
 };
 
 
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
  public:
-  LCheckInstanceType(LOperand* value, LOperand* temp) {
+  explicit LCheckInstanceType(LOperand* value) {
     inputs_[0] = value;
-    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
@@ -1781,7 +1816,7 @@
       pointer_maps_(8),
       inlined_closures_(1) { }
 
-  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
   LConstantOperand* DefineConstantOperand(HConstant* constant);
   Handle<Object> LookupLiteral(LConstantOperand* operand) const;
   Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index e104e5b..50b689b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -68,7 +68,9 @@
 }
 
 
-void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(const Operand& with,
+                                 Heap::RootListIndex index) {
+  ASSERT(!with.AddressUsesRegister(kScratchRegister));
   LoadRoot(kScratchRegister, index);
   cmpq(with, kScratchRegister);
 }
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 3536911..8bb3190 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,7 @@
 
   void LoadRoot(Register destination, Heap::RootListIndex index);
   void CompareRoot(Register with, Heap::RootListIndex index);
-  void CompareRoot(Operand with, Heap::RootListIndex index);
+  void CompareRoot(const Operand& with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
   void StoreRoot(Register source, Heap::RootListIndex index);
 
@@ -540,6 +540,14 @@
 
   // ---------------------------------------------------------------------------
   // String macros.
+
+  // If object is a string, its map is loaded into object_map.
+  template <typename LabelType>
+  void JumpIfNotString(Register object,
+                       Register object_map,
+                       LabelType* not_string);
+
+
   template <typename LabelType>
   void JumpIfNotBothSequentialAsciiStrings(Register first_object,
                                            Register second_object,
@@ -596,6 +604,12 @@
   void Call(ExternalReference ext);
   void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
 
+  // Emit call to the code we are currently generating.
+  void CallSelf() {
+    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+    Call(self, RelocInfo::CODE_TARGET);
+  }
+
   // Non-x64 instructions.
   // Push/pop all general purpose registers.
   // Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -1458,6 +1472,8 @@
   ASSERT(!src1.is(kScratchRegister));
   ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(rcx));
+  // dst and src1 can be the same, because the one case that bails out
+  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
   NearLabel result_ok;
   if (src1.is(rcx) || src2.is(rcx)) {
     movq(kScratchRegister, rcx);
@@ -1592,6 +1608,17 @@
 
 
 template <typename LabelType>
+void MacroAssembler::JumpIfNotString(Register object,
+                                     Register object_map,
+                                     LabelType* not_string) {
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, not_string);
+  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+  j(above_equal, not_string);
+}
+
+
+template <typename LabelType>
 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
                                                          Register second_object,
                                                          Register scratch1,
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8888d70..9cb88f3 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2998,6 +2998,35 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map matches.
+  __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+  GenerateFastPixelArrayLoad(masm(),
+                             rdx,
+                             rax,
+                             rbx,
+                             rcx,
+                             rax,
+                             &miss,
+                             &miss,
+                             &miss);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index d7e446d..9b070d2 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -66,7 +66,7 @@
 test-deoptimization/DeoptimizeBinaryOperationDIV: FAIL
 test-deoptimization/DeoptimizeLoadICStoreIC: FAIL
 test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL
-test-deoptimization/DeoptimizeCompare: FAIL
+test-deoptimization/DeoptimizeCompare: PASS || FAIL
 
 # Tests that time out with crankshaft.
 test-api/Threading: SKIP
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index ee62067..f88ba38 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -5309,11 +5309,13 @@
 }
 
 
+static bool allowed_access_type[v8::ACCESS_KEYS + 1] = { false };
 static bool NamedAccessBlocker(Local<v8::Object> global,
                                Local<Value> name,
                                v8::AccessType type,
                                Local<Value> data) {
-  return Context::GetCurrent()->Global()->Equals(global);
+  return Context::GetCurrent()->Global()->Equals(global) ||
+      allowed_access_type[type];
 }
 
 
@@ -5321,7 +5323,8 @@
                                  uint32_t key,
                                  v8::AccessType type,
                                  Local<Value> data) {
-  return Context::GetCurrent()->Global()->Equals(global);
+  return Context::GetCurrent()->Global()->Equals(global) ||
+      allowed_access_type[type];
 }
 
 
@@ -5353,7 +5356,7 @@
 }
 
 
-THREADED_TEST(AccessControl) {
+TEST(AccessControl) {
   v8::HandleScope handle_scope;
   v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
 
@@ -5379,6 +5382,27 @@
 
   v8::Handle<v8::Object> global0 = context0->Global();
 
+  // Define a property with JS getter and setter.
+  CompileRun(
+      "function getter() { return 'getter'; };\n"
+      "function setter() { return 'setter'; }\n"
+      "Object.defineProperty(this, 'js_accessor_p', {get:getter, set:setter})");
+
+  Local<Value> getter = global0->Get(v8_str("getter"));
+  Local<Value> setter = global0->Get(v8_str("setter"));
+
+  // And define normal element.
+  global0->Set(239, v8_str("239"));
+
+  // Define an element with JS getter and setter.
+  CompileRun(
+      "function el_getter() { return 'el_getter'; };\n"
+      "function el_setter() { return 'el_setter'; };\n"
+      "Object.defineProperty(this, '42', {get: el_getter, set: el_setter});");
+
+  Local<Value> el_getter = global0->Get(v8_str("el_getter"));
+  Local<Value> el_setter = global0->Get(v8_str("el_setter"));
+
   v8::HandleScope scope1;
 
   v8::Persistent<Context> context1 = Context::New();
@@ -5387,19 +5411,158 @@
   v8::Handle<v8::Object> global1 = context1->Global();
   global1->Set(v8_str("other"), global0);
 
-  v8::Handle<Value> value;
+  // Access blocked property.
+  CompileRun("other.blocked_prop = 1");
 
-  // Access blocked property
-  value = CompileRun("other.blocked_prop = 1");
-  value = CompileRun("other.blocked_prop");
-  CHECK(value->IsUndefined());
+  ExpectUndefined("other.blocked_prop");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
+  ExpectFalse("propertyIsEnumerable.call(other, 'blocked_prop')");
 
-  value = CompileRun(
+  // Enable ACCESS_HAS
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  ExpectUndefined("other.blocked_prop");
+  // ... and now we can get the descriptor...
+  ExpectUndefined(
       "Object.getOwnPropertyDescriptor(other, 'blocked_prop').value");
-  CHECK(value->IsUndefined());
+  // ... and enumerate the property.
+  ExpectTrue("propertyIsEnumerable.call(other, 'blocked_prop')");
+  allowed_access_type[v8::ACCESS_HAS] = false;
 
-  value = CompileRun("propertyIsEnumerable.call(other, 'blocked_prop')");
-  CHECK(value->IsFalse());
+  // Access blocked element.
+  CompileRun("other[239] = 1");
+
+  ExpectUndefined("other[239]");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239')");
+  ExpectFalse("propertyIsEnumerable.call(other, '239')");
+
+  // Enable ACCESS_HAS
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  ExpectUndefined("other[239]");
+  // ... and now we can get the descriptor...
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239').value");
+  // ... and enumerate the property.
+  ExpectTrue("propertyIsEnumerable.call(other, '239')");
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Access a property with JS accessor.
+  CompileRun("other.js_accessor_p = 2");
+
+  ExpectUndefined("other.js_accessor_p");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p')");
+
+  // Enable ACCESS_HAS.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  ExpectUndefined("other.js_accessor_p");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS and ACCESS_GET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_GET] = true;
+
+  ExpectString("other.js_accessor_p", "getter");
+  ExpectObject(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+  allowed_access_type[v8::ACCESS_GET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS and ACCESS_SET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_SET] = true;
+
+  ExpectUndefined("other.js_accessor_p");
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+  ExpectObject(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+  allowed_access_type[v8::ACCESS_SET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_GET] = true;
+  allowed_access_type[v8::ACCESS_SET] = true;
+
+  ExpectString("other.js_accessor_p", "getter");
+  ExpectObject(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+  ExpectObject(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+  ExpectUndefined(
+      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+  allowed_access_type[v8::ACCESS_SET] = false;
+  allowed_access_type[v8::ACCESS_GET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Access an element with JS accessor.
+  CompileRun("other[42] = 2");
+
+  ExpectUndefined("other[42]");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42')");
+
+  // Enable ACCESS_HAS.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  ExpectUndefined("other[42]");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS and ACCESS_GET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_GET] = true;
+
+  ExpectString("other[42]", "el_getter");
+  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+  allowed_access_type[v8::ACCESS_GET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS and ACCESS_SET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_SET] = true;
+
+  ExpectUndefined("other[42]");
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+  allowed_access_type[v8::ACCESS_SET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+  allowed_access_type[v8::ACCESS_HAS] = true;
+  allowed_access_type[v8::ACCESS_GET] = true;
+  allowed_access_type[v8::ACCESS_SET] = true;
+
+  ExpectString("other[42]", "el_getter");
+  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+  allowed_access_type[v8::ACCESS_SET] = false;
+  allowed_access_type[v8::ACCESS_GET] = false;
+  allowed_access_type[v8::ACCESS_HAS] = false;
+
+  v8::Handle<Value> value;
 
   // Access accessible property
   value = CompileRun("other.accessible_prop = 3");
@@ -7362,6 +7525,61 @@
       "garbage = undefined;");
 }
 
+v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
+  static int count = 0;
+  if (count++ % 3 == 0) {
+    v8::V8::LowMemoryNotification();  // This should move the stub
+    GenerateSomeGarbage();  // This should ensure the old stub memory is flushed
+  }
+  return v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
+  v8::HandleScope scope;
+  LocalContext context;
+  v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+  nativeobject_templ->Set("callback",
+                          v8::FunctionTemplate::New(DirectApiCallback));
+  v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+  context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+  // call the api function multiple times to ensure direct call stub creation.
+  CompileRun(
+        "function f() {"
+        "  for (var i = 1; i <= 30; i++) {"
+        "    nativeobject.callback();"
+        "  }"
+        "}"
+        "f();");
+}
+
+
+v8::Handle<v8::Value> ThrowingDirectApiCallback(const v8::Arguments& args) {
+  return v8::ThrowException(v8_str("g"));
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_Throw) {
+  v8::HandleScope scope;
+  LocalContext context;
+  v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+  nativeobject_templ->Set("callback",
+                          v8::FunctionTemplate::New(ThrowingDirectApiCallback));
+  v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+  context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+  // call the api function multiple times to ensure direct call stub creation.
+  v8::Handle<Value> result = CompileRun(
+      "var result = '';"
+      "function f() {"
+      "  for (var i = 1; i <= 5; i++) {"
+      "    try { nativeobject.callback(); } catch (e) { result += e; }"
+      "  }"
+      "}"
+      "f(); result;");
+  CHECK_EQ(v8_str("ggggg"), result);
+}
+
+
 THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
   int interceptor_call_count = 0;
   v8::HandleScope scope;
@@ -10369,6 +10587,93 @@
                       "i");
   CHECK_EQ(255, result->Int32Value());
 
+  // Make sure that pixel array ICs recognize when a non-pixel array
+  // is passed to it.
+  result = CompileRun("function pa_load(p) {"
+                      "  var sum = 0;"
+                      "  for (var j = 0; j < 256; j++) { sum += p[j]; }"
+                      "  return sum;"
+                      "}"
+                      "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+                      "just_ints = new Object();"
+                      "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) {"
+                      "  result = pa_load(just_ints);"
+                      "}"
+                      "result");
+  CHECK_EQ(32640, result->Int32Value());
+
+  // Make sure that pixel array ICs recognize out-of-bound accesses.
+  result = CompileRun("function pa_load(p, start) {"
+                      "  var sum = 0;"
+                      "  for (var j = start; j < 256; j++) { sum += p[j]; }"
+                      "  return sum;"
+                      "}"
+                      "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+                      "for (var i = 0; i < 10; ++i) {"
+                      "  result = pa_load(pixels,-10);"
+                      "}"
+                      "result");
+  CHECK_EQ(0, result->Int32Value());
+
+  // Make sure that generic ICs properly handles a pixel array.
+  result = CompileRun("function pa_load(p) {"
+                      "  var sum = 0;"
+                      "  for (var j = 0; j < 256; j++) { sum += p[j]; }"
+                      "  return sum;"
+                      "}"
+                      "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+                      "just_ints = new Object();"
+                      "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+                      "for (var i = 0; i < 10; ++i) {"
+                      "  result = pa_load(pixels);"
+                      "}"
+                      "result");
+  CHECK_EQ(32640, result->Int32Value());
+
+  // Make sure that generic load ICs recognize out-of-bound accesses in
+  // pixel arrays.
+  result = CompileRun("function pa_load(p, start) {"
+                      "  var sum = 0;"
+                      "  for (var j = start; j < 256; j++) { sum += p[j]; }"
+                      "  return sum;"
+                      "}"
+                      "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+                      "just_ints = new Object();"
+                      "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(just_ints,0); }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+                      "for (var i = 0; i < 10; ++i) {"
+                      "  result = pa_load(pixels,-10);"
+                      "}"
+                      "result");
+  CHECK_EQ(0, result->Int32Value());
+
+  // Make sure that generic ICs properly handles other types than pixel
+  // arrays (that the inlined fast pixel array test leaves the right information
+  // in the right registers).
+  result = CompileRun("function pa_load(p) {"
+                      "  var sum = 0;"
+                      "  for (var j = 0; j < 256; j++) { sum += p[j]; }"
+                      "  return sum;"
+                      "}"
+                      "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+                      "just_ints = new Object();"
+                      "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+                      "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+                      "sparse_array = new Object();"
+                      "for (var i = 0; i < 256; ++i) { sparse_array[i] = i; }"
+                      "sparse_array[1000000] = 3;"
+                      "for (var i = 0; i < 10; ++i) {"
+                      "  result = pa_load(sparse_array);"
+                      "}"
+                      "result");
+  CHECK_EQ(32640, result->Int32Value());
+
   free(pixel_data);
 }
 
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index e5f73cb..43cf580 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -409,71 +409,189 @@
 }
 
 
-static void TestRoundingMode(int32_t mode, double value, int expected) {
+enum VCVTTypes {
+  s32_f64,
+  u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+                             VFPRoundingMode mode,
+                             double value,
+                             int expected,
+                             bool expected_exception = false) {
   InitializeVM();
   v8::HandleScope scope;
 
   Assembler assm(NULL, 0);
 
-  __ vmrs(r1);
-  // Set custom FPSCR.
-  __ bic(r2, r1, Operand(((mode ^ 3) << 22) | 0xf));
-  __ orr(r2, r2, Operand(mode << 22));
-  __ vmsr(r2);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
 
-  // Load value, convert, and move back result to r0.
-  __ vmov(d1, value);
-  __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
-  __ vmov(r0, s0);
+    Label wrong_exception;
 
-  __ mov(pc, Operand(lr));
+    __ vmrs(r1);
+    // Set custom FPSCR.
+    __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+    __ orr(r2, r2, Operand(mode));
+    __ vmsr(r2);
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Object* code = Heap::CreateCode(
-      desc,
-      Code::ComputeFlags(Code::STUB),
-      Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
-  CHECK(code->IsCode());
+    // Load value, convert, and move back result to r0 if everything went well.
+    __ vmov(d1, value);
+    switch (types) {
+      case s32_f64:
+        __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+        break;
+
+      case u32_f64:
+        __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+        break;
+
+      default:
+        UNREACHABLE();
+        break;
+    }
+    // Check for vfp exceptions
+    __ vmrs(r2);
+    __ tst(r2, Operand(kVFPExceptionMask));
+    // Check that we behaved as expected.
+    __ b(&wrong_exception,
+         expected_exception ? eq : ne);
+    // There was no exception. Retrieve the result and return.
+    __ vmov(r0, s0);
+    __ mov(pc, Operand(lr));
+
+    // The exception behaviour is not what we expected.
+    // Load a special value and return.
+    __ bind(&wrong_exception);
+    __ mov(r0, Operand(11223344));
+    __ mov(pc, Operand(lr));
+
+    CodeDesc desc;
+    assm.GetCode(&desc);
+    Object* code = Heap::CreateCode(
+        desc,
+        Code::ComputeFlags(Code::STUB),
+        Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+    CHECK(code->IsCode());
 #ifdef DEBUG
-  Code::cast(code)->Print();
+    Code::cast(code)->Print();
 #endif
-  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
-  int res = reinterpret_cast<int>(
-              CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
-  ::printf("res = %d\n", res);
-  CHECK_EQ(expected, res);
+    F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+    int res = reinterpret_cast<int>(
+                CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+    ::printf("res = %d\n", res);
+    CHECK_EQ(expected, res);
+  }
 }
 
 
 TEST(7) {
   // Test vfp rounding modes.
 
-  // See ARM DDI 0406B Page A2-29.
-  enum FPSCRRoungingMode {
-    RN,   // Round to Nearest.
-    RP,   // Round towards Plus Infinity.
-    RM,   // Round towards Minus Infinity.
-    RZ    // Round towards zero.
-  };
+  // s32_f64 (double to integer).
 
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
+  TestRoundingMode(s32_f64, RN,  0, 0);
+  TestRoundingMode(s32_f64, RN,  0.5, 0);
+  TestRoundingMode(s32_f64, RN, -0.5, 0);
+  TestRoundingMode(s32_f64, RN,  1.5, 2);
+  TestRoundingMode(s32_f64, RN, -1.5, -2);
+  TestRoundingMode(s32_f64, RN,  123.7, 124);
+  TestRoundingMode(s32_f64, RN, -123.7, -124);
+  TestRoundingMode(s32_f64, RN,  123456.2,  123456);
+  TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+  TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+  TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+  TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+  TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+  TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+  TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+  TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
 
-    TestRoundingMode(RZ,  0.5, 0);
-    TestRoundingMode(RZ, -0.5, 0);
-    TestRoundingMode(RZ,  123.7,  123);
-    TestRoundingMode(RZ, -123.7, -123);
-    TestRoundingMode(RZ,  123456.2,  123456);
-    TestRoundingMode(RZ, -123456.2, -123456);
+  TestRoundingMode(s32_f64, RM,  0, 0);
+  TestRoundingMode(s32_f64, RM,  0.5, 0);
+  TestRoundingMode(s32_f64, RM, -0.5, -1);
+  TestRoundingMode(s32_f64, RM,  123.7, 123);
+  TestRoundingMode(s32_f64, RM, -123.7, -124);
+  TestRoundingMode(s32_f64, RM,  123456.2,  123456);
+  TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+  TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+  TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+  TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+  TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+  TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
 
-    TestRoundingMode(RM,  0.5, 0);
-    TestRoundingMode(RM, -0.5, -1);
-    TestRoundingMode(RM,  123.7, 123);
-    TestRoundingMode(RM, -123.7, -124);
-    TestRoundingMode(RM,  123456.2,  123456);
-    TestRoundingMode(RM, -123456.2, -123457);
-  }
+  TestRoundingMode(s32_f64, RZ,  0, 0);
+  TestRoundingMode(s32_f64, RZ,  0.5, 0);
+  TestRoundingMode(s32_f64, RZ, -0.5, 0);
+  TestRoundingMode(s32_f64, RZ,  123.7,  123);
+  TestRoundingMode(s32_f64, RZ, -123.7, -123);
+  TestRoundingMode(s32_f64, RZ,  123456.2,  123456);
+  TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+  TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+  TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+  TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+  TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+  TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+  // u32_f64 (double to integer).
+
+  // Negative values.
+  TestRoundingMode(u32_f64, RN, -0.5, 0);
+  TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+  TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+  TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+  TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+  TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+  TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+  TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+  TestRoundingMode(u32_f64, RZ, -0.5, 0);
+  TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+  TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+  TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+  // Positive values.
+  // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+  static const uint32_t kMaxUInt = 0xffffffffu;
+  TestRoundingMode(u32_f64, RZ,  0, 0);
+  TestRoundingMode(u32_f64, RZ,  0.5, 0);
+  TestRoundingMode(u32_f64, RZ,  123.7,  123);
+  TestRoundingMode(u32_f64, RZ,  123456.2,  123456);
+  TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+  TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+                                static_cast<uint32_t>(kMaxInt) + 1);
+  TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+  TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+  TestRoundingMode(u32_f64, RM,  0, 0);
+  TestRoundingMode(u32_f64, RM,  0.5, 0);
+  TestRoundingMode(u32_f64, RM,  123.7, 123);
+  TestRoundingMode(u32_f64, RM,  123456.2,  123456);
+  TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+  TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+                                static_cast<uint32_t>(kMaxInt) + 1);
+  TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+  TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+  TestRoundingMode(u32_f64, RN,  0, 0);
+  TestRoundingMode(u32_f64, RN,  0.5, 0);
+  TestRoundingMode(u32_f64, RN,  1.5, 2);
+  TestRoundingMode(u32_f64, RN,  123.7, 124);
+  TestRoundingMode(u32_f64, RN,  123456.2,  123456);
+  TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+  TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+  TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+                                static_cast<uint32_t>(kMaxInt) + 1);
+  TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+  TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+  TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
 }
 
 #undef __
diff --git a/test/cctest/test-bignum-dtoa.cc b/test/cctest/test-bignum-dtoa.cc
index 51a9057..a696ed8 100644
--- a/test/cctest/test-bignum-dtoa.cc
+++ b/test/cctest/test-bignum-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,7 @@
 // Removes trailing '0' digits.
 // Can return the empty string if all digits are 0.
 static void TrimRepresentation(Vector<char> representation) {
-  int len = strlen(representation.start());
+  int len = StrLength(representation.start());
   int i;
   for (i = len - 1; i >= 0; --i) {
     if (representation[i] != '0') break;
diff --git a/test/cctest/test-dtoa.cc b/test/cctest/test-dtoa.cc
index ff0b660..66c2aaf 100644
--- a/test/cctest/test-dtoa.cc
+++ b/test/cctest/test-dtoa.cc
@@ -44,7 +44,7 @@
 
 // Removes trailing '0' digits.
 static void TrimRepresentation(Vector<char> representation) {
-  int len = strlen(representation.start());
+  int len = StrLength(representation.start());
   int i;
   for (i = len - 1; i >= 0; --i) {
     if (representation[i] != '0') break;
diff --git a/test/cctest/test-fast-dtoa.cc b/test/cctest/test-fast-dtoa.cc
index 30d2476..d311713 100644
--- a/test/cctest/test-fast-dtoa.cc
+++ b/test/cctest/test-fast-dtoa.cc
@@ -19,7 +19,7 @@
 
 // Removes trailing '0' digits.
 static void TrimRepresentation(Vector<char> representation) {
-  int len = strlen(representation.start());
+  int len = StrLength(representation.start());
   int i;
   for (i = len - 1; i >= 0; --i) {
     if (representation[i] != '0') break;
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 47b6887..0dd2750 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -44,9 +44,6 @@
 # We do not have a global object called 'global' as required by tests.
 chapter15/15.1: FAIL_OK
 
-# NOT IMPLEMENTED: bind
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED
-
 # NaN is writable. We are compatible with JSC.
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
 # Infinity is writable. We are compatible with JSC.
@@ -94,7 +91,7 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-3: FAIL_OK
 
-# SUBSETFAIL + we do not implement Function.prototype.bind.
+# SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-4: FAIL_OK
 
 # SUBSETFAIL
@@ -175,9 +172,6 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
 
-# NOT IMPLEMENTED: bind on Function.prototype.
-chapter15/15.3/15.3.4/15.3.4.5/15.3.4.5-0-1: UNIMPLEMENTED
-
 # Bad test - the spec does not say anything about throwing errors
 # on calling Array.prototype.indexOf with undefined as argument.
 chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-1-1: FAIL_OK
@@ -235,6 +229,218 @@
 chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
 chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
 
+##############################################################################
+# Unimplemented parts of strict mode
+# Setting expectations to fail only so that the tests trigger as soon as
+# the strict mode feature gets implemented
+
+# A directive preceeding an 'use strict' directive may not contain an OctalEscapeSequence
+# Incorrect test - need double escape in eval.
+chapter07/7.8/7.8.4/7.8.4-1-s: FAIL
+
+# this is not coerced to an object in strict mode (Number)
+chapter10/10.4/10.4.3/10.4.3-1-1-s: FAIL
+# this is not coerced to an object in strict mode (string)
+chapter10/10.4/10.4.3/10.4.3-1-2-s: FAIL
+# this is not coerced to an object in strict mode (undefined)
+chapter10/10.4/10.4.3/10.4.3-1-3-s: FAIL
+# this is not coerced to an object in strict mode (boolean)
+chapter10/10.4/10.4.3/10.4.3-1-4-s: FAIL
+
+# arguments[i] remains same after changing actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-1-s: FAIL
+# arguments[i] doesn't map to actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-2-s: FAIL
+
+# Accessing caller property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-b-1-s: FAIL
+# arguments.caller exists in strict mode
+chapter10/10.6/10.6-13-b-2-s: FAIL
+# arguments.caller is non-configurable in strict mode
+chapter10/10.6/10.6-13-b-3-s: FAIL
+# Accessing callee property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-c-1-s: FAIL
+# arguments.callee is non-configurable in strict mode
+chapter10/10.6/10.6-13-c-3-s: FAIL
+
+# simple assignment throws ReferenceError if LeftHandSide is an unresolvable reference in strict mode
+chapter11/11.13/11.13.1/11.13.1-1-5-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a property reference with a primitive base value (this is undefined)
+chapter11/11.13/11.13.1/11.13.1-1-7-s: FAIL
+
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-2-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.Infinity)
+chapter11/11.13/11.13.1/11.13.1-4-3-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.length)
+chapter11/11.13/11.13.1/11.13.1-4-4-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Object.length)
+chapter11/11.13/11.13.1/11.13.1-4-5-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Function.length)
+chapter11/11.13/11.13.1/11.13.1-4-6-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Array.length)
+chapter11/11.13/11.13.1/11.13.1-4-7-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (String.length)
+chapter11/11.13/11.13.1/11.13.1-4-8-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Boolean.length)
+chapter11/11.13/11.13.1/11.13.1-4-9-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.length)
+chapter11/11.13/11.13.1/11.13.1-4-10-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Date.length)
+chapter11/11.13/11.13.1/11.13.1-4-11-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (RegExp.length)
+chapter11/11.13/11.13.1/11.13.1-4-12-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Error.length)
+chapter11/11.13/11.13.1/11.13.1-4-13-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.MAX_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-14-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.MIN_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-15-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-16-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.NEGATIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-17-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Number.POSITIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-18-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.E)
+chapter11/11.13/11.13.1/11.13.1-4-19-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LN10)
+chapter11/11.13/11.13.1/11.13.1-4-20-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LN2)
+chapter11/11.13/11.13.1/11.13.1-4-21-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LOG2E)
+chapter11/11.13/11.13.1/11.13.1-4-22-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.LOG10E)
+chapter11/11.13/11.13.1/11.13.1-4-23-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.PI)
+chapter11/11.13/11.13.1/11.13.1-4-24-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.SQRT1_2)
+chapter11/11.13/11.13.1/11.13.1-4-25-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Math.SQRT2)
+chapter11/11.13/11.13.1/11.13.1-4-26-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property in strict mode (Global.undefined)
+chapter11/11.13/11.13.1/11.13.1-4-27-s: FAIL
+
+# delete operator throws TypeError when deleting a non-configurable data property in strict mode
+chapter11/11.4/11.4.1/11.4.1-4.a-3-s: FAIL
+# delete operator throws TypeError when when deleting a non-configurable data property in strict mode (Global.NaN)
+chapter11/11.4/11.4.1/11.4.1-4.a-4-s: FAIL
+# delete operator throws TypeError when deleting a non-configurable data property in strict mode (Math.LN2)
+chapter11/11.4/11.4.1/11.4.1-4.a-9-s: FAIL
+
+# delete operator throws ReferenceError when deleting a direct reference to a var in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-1-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference to a function argument in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-2-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference to a function name in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-3-s: FAIL
+# delete operator throws SyntaxError when deleting a direct reference to a function argument(object) in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-4-s: FAIL
+
+# eval - a function declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-1-s: FAIL
+# eval - a function assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-2-s: FAIL
+# eval - a function expr declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-3-s: FAIL
+# eval - a function expr assigning into 'eval' throws a EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-4-s: FAIL
+# eval - a Function declaring var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-5-s: FAIL
+# eval - a Function assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-6-s: FAIL
+# eval - a direct eval declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-7-s: FAIL
+# eval - a direct eval assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-8-s: FAIL
+# eval - an indirect eval declaring a var named 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-9-s: FAIL
+# eval - an indirect eval assigning into 'eval' throws EvalError in strict mode
+# EvalError - incorrect test (SyntaxError should be expected instead of EvalError)
+chapter12/12.2/12.2.1/12.2.1-10-s: FAIL
+
+# SyntaxError if eval used as function identifier in function declaration with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-3-s: FAIL
+# SyntaxError if eval used as function identifier in function expression with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-4-s: FAIL
+# SyntaxError if eval used as function identifier in function declaration in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-5-s: FAIL
+# SyntaxError if eval used as function identifier in function expression in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-6-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-9-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression with strict body
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-10-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-11-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression in strict code
+# test uses implicit return (which doesn't seem to work in v8 or safari jsc)
+chapter13/13.1/13.1-3-12-s: FAIL
+
+# 'use strict' directive - correct usage
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-1-s: FAIL
+# "use strict" directive - correct usage double quotes
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-2-s: FAIL
+# 'use strict' directive - may follow other directives
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-8-s: FAIL
+# 'use strict' directive - may occur multiple times
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-9-s: FAIL
+# other directives - may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-10-s: FAIL
+# comments may preceed 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-11-s: FAIL
+# comments may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-12-s: FAIL
+# semicolon insertion works for'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-13-s: FAIL
+# semicolon insertion may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-14-s: FAIL
+# blank lines may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-15-s: FAIL
+
+# Duplicate combined parameter name allowed in Function constructor called in strict mode if body not strict
+# Invalid test case per ECMA-262 5th Edition, 10.1.1, bullet 4
+chapter15/15.3/15.3.2/15.3.2.1/15.3.2.1-11-6-s: FAIL
+
+# Array.prototype.every - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1-s: FAIL
+# Array.prototype.some - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1-s: FAIL
+# Array.prototype.forEach - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1-s: FAIL
+# Array.prototype.map - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1-s: FAIL
+# Array.prototype.filter - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.20/15.4.4.20-5-1-s: FAIL
+# Array.prototype.reduce - null passed as thisValue to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL
+
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index ceb7715..79c1fac 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -103,3 +103,19 @@
 objWithProto[0] = 'bar';
 var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
 assertEquals(undefined, descWithProto);
+
+// Test elements on global proxy object.
+var global = (function() { return this; })();
+
+global[42] = 42;
+
+function el_getter() { return 239; };
+function el_setter() {};
+Object.defineProperty(global, '239', {get: el_getter, set: el_setter});
+
+var descRegularElement = Object.getOwnPropertyDescriptor(global, '42');
+assertEquals(42, descRegularElement.value);
+
+var descAccessorElement = Object.getOwnPropertyDescriptor(global, '239');
+assertEquals(el_getter, descAccessorElement.get);
+assertEquals(el_setter, descAccessorElement.set);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 057c0fa..2448564 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -112,6 +112,8 @@
 # Test that currently fails with crankshaft on ARM.
 compiler/simple-osr: FAIL
 
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
 
 ##############################################################################
 [ $arch == x64 && $crankshaft ]
@@ -119,6 +121,8 @@
 # BUG (1026) This test is currently flaky.
 compiler/simple-osr: SKIP
 
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
 
 ##############################################################################
 [ $arch == mips ]
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index d24a4e5..a8a3213 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -749,14 +749,33 @@
 assertTrue(desc.enumerable);
 assertFalse(desc.configurable);
 
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property.
 try {
   Object.defineProperty(obj6, '2', descElement);
+  desc = Object.getOwnPropertyDescriptor(obj6, '2');
+  assertEquals(desc.value, 'foobar');
+} catch (e) {
+  assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+  var descAccessor = { get: function() { return 0; } };
+  Object.defineProperty(obj6, '2', descAccessor);
   assertUnreachable();
 } catch (e) {
   assertTrue(/Cannot redefine property/.test(e));
 }
 
+Object.defineProperty(obj6, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(obj6, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
 Object.defineProperty(obj6, '3', descElementNonWritable);
 desc = Object.getOwnPropertyDescriptor(obj6, '3');
 assertEquals(desc.value, 'foofoo');
@@ -827,14 +846,33 @@
 assertTrue(desc.enumerable);
 assertFalse(desc.configurable);
 
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property of an array.
 try {
   Object.defineProperty(arr, '2', descElement);
+  desc = Object.getOwnPropertyDescriptor(arr, '2');
+  assertEquals(desc.value, 'foobar');
+} catch (e) {
+  assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+  var descAccessor = { get: function() { return 0; } };
+  Object.defineProperty(arr, '2', descAccessor);
   assertUnreachable();
 } catch (e) {
   assertTrue(/Cannot redefine property/.test(e));
 }
 
+Object.defineProperty(arr, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(arr, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
 Object.defineProperty(arr, '3', descElementNonWritable);
 desc = Object.getOwnPropertyDescriptor(arr, '3');
 assertEquals(desc.value, 'foofoo');
@@ -898,3 +936,98 @@
 assertEquals(undefined, o.x);
 o.x = 37;
 assertEquals(undefined, o.x);
+
+function testDefineProperty(obj, propertyName, desc, resultDesc) {
+  Object.defineProperty(obj, propertyName, desc);
+  var actualDesc = Object.getOwnPropertyDescriptor(obj, propertyName);
+  assertEquals(resultDesc.enumerable, actualDesc.enumerable);
+  assertEquals(resultDesc.configurable, actualDesc.configurable);
+  if (resultDesc.hasOwnProperty('value')) {
+    assertEquals(resultDesc.value, actualDesc.value);
+    assertEquals(resultDesc.writable, actualDesc.writable);
+    assertFalse(resultDesc.hasOwnProperty('get'));
+    assertFalse(resultDesc.hasOwnProperty('set'));
+  } else {
+    assertEquals(resultDesc.get, actualDesc.get);
+    assertEquals(resultDesc.set, actualDesc.set);
+    assertFalse(resultDesc.hasOwnProperty('value'));
+    assertFalse(resultDesc.hasOwnProperty('writable'));
+  }
+}
+
+// tests redefining existing property with a generic descriptor
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { },
+  { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : true },
+  { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { configurable : true },
+  { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : false },
+  { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { configurable : false },
+  { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : true, configurable : true },
+  { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : false, configurable : true },
+  { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : true, configurable : false },
+  { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+  { enumerable : false, configurable : false },
+  { value : 42, writable : true, enumerable : false, configurable : false });
+
+// can make a writable, non-configurable field non-writable
+o = { p : 42 };
+Object.defineProperty(o, 'p', { configurable: false });
+testDefineProperty(o, 'p',
+  { writable: false },
+  { value : 42, writable : false, enumerable : true, configurable : false });
+
+// redefine of get only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+  { get : getter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+  { enumerable : false, configurable : false },
+  { get: getter1, set: undefined, enumerable : false, configurable : false });
+
+// redefine of get/set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+  { get: getter1, set: setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+  { enumerable : false, configurable : false },
+  { get: getter1, set: setter1, enumerable : false, configurable : false });
+
+// redefine of set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+  { set : setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+  { enumerable : false, configurable : false },
+  { get: undefined, set: setter1, enumerable : false, configurable : false });
diff --git a/test/mjsunit/regress/regress-1083.js b/test/mjsunit/regress/regress-1083.js
new file mode 100644
index 0000000..d231899
--- /dev/null
+++ b/test/mjsunit/regress/regress-1083.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that changing the generic descriptor flags on a property
+// on the global object doesn't break invariants.
+Object.defineProperty(this, 'Object', {enumerable:true});
+
+var desc = Object.getOwnPropertyDescriptor(this, 'Object');
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+assertFalse(desc.hasOwnProperty('get'));
+assertFalse(desc.hasOwnProperty('set'));
+assertTrue(desc.hasOwnProperty('value'));
+assertTrue(desc.writable);
diff --git a/test/mjsunit/regress/regress-1092.js b/test/mjsunit/regress/regress-1092.js
new file mode 100644
index 0000000..0b29231
--- /dev/null
+++ b/test/mjsunit/regress/regress-1092.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that CodeGenerator::EmitKeyedPropertyAssignment for the start 
+// of an initialization block doesn't normalize the properties of the
+// JSGlobalProxy.
+this.w = 0;
+this.x = 1;
+this.y = 2;
+this.z = 3;
+
diff --git a/test/mjsunit/regress/regress-1099.js b/test/mjsunit/regress/regress-1099.js
new file mode 100644
index 0000000..0ed6ede
--- /dev/null
+++ b/test/mjsunit/regress/regress-1099.js
@@ -0,0 +1,46 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that LApplyArguments lithium instruction restores context after the call.
+
+function X() {
+  var slot = "foo"; return function (a) { return slot === a; }
+}
+
+function Y(x) {
+  var slot = "bar";
+  return function (a) {
+    x.apply(this, arguments);
+    return slot === 'bar';
+  };
+}
+
+var y = Y(X());
+
+for (var i = 0; i < 1000000; i++) {
+  assertTrue(y("foo"));
+}
diff --git a/test/mjsunit/regress/regress-900966.js b/test/mjsunit/regress/regress-900966.js
index acffe75..99603c1 100644
--- a/test/mjsunit/regress/regress-900966.js
+++ b/test/mjsunit/regress/regress-900966.js
@@ -37,6 +37,8 @@
 }
 f();
 f();
+f();
+f();
 
 assertTrue(2[11] === undefined);
 Number.prototype[11] = 'y';
diff --git a/test/mjsunit/regress/regress-992.js b/test/mjsunit/regress/regress-992.js
new file mode 100644
index 0000000..dbe25a5
--- /dev/null
+++ b/test/mjsunit/regress/regress-992.js
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Object.defineProperty with generic desc on existing property
+// should just update enumerable/configurable flags.
+
+var obj =  { get p() { return 42; }  };
+var desc = Object.getOwnPropertyDescriptor(obj, 'p');
+var getter = desc.get;
+
+Object.defineProperty(obj, 'p', {enumerable: false });
+assertEquals(obj.p, 42);
+desc = Object.getOwnPropertyDescriptor(obj, 'p');
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertEquals(desc.get, getter);
+assertEquals(desc.set, undefined);
+assertFalse(desc.hasOwnProperty('value'));
+assertFalse(desc.hasOwnProperty('writable'));
diff --git a/test/mjsunit/regress/regress-deopt-gc.js b/test/mjsunit/regress/regress-deopt-gc.js
new file mode 100644
index 0000000..7b7c29a
--- /dev/null
+++ b/test/mjsunit/regress/regress-deopt-gc.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// This tests that we can correctly handle a GC immediately after a function
+// has been deoptimized, even when we have an activation of this function on
+// the stack.
+
+// Ensure that there is code objects before the code for the opt_me function.
+(function() { var a = 10; a++; })();
+
+function opt_me() {
+  deopt();
+}
+
+function deopt() {
+  // Make sure we don't inline this function
+  try { var a = 42; } catch(o) {};
+  %DeoptimizeFunction(opt_me);
+  gc(true);
+}
+
+
+opt_me();
diff --git a/test/mjsunit/strict-mode-eval.js b/test/mjsunit/strict-mode-eval.js
new file mode 100644
index 0000000..018ed9e
--- /dev/null
+++ b/test/mjsunit/strict-mode-eval.js
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var code1 = "function f(eval) {}";
+var code2 = "function f(a, a) {}";
+var code3 = "var x = '\\020;'";
+var code4 = "function arguments() {}";
+
+// Verify the code compiles just fine in non-strict mode
+// (using aliased eval to force non-strict mode)
+var eval_alias = eval;
+
+eval_alias(code1);
+eval_alias(code2);
+eval_alias(code3);
+eval_alias(code4);
+
+function strict1() {
+  try {
+    eval(code1);
+    assertUnreachable("did not throw exception");
+  } catch (e) {
+    assertInstanceof(e, SyntaxError);
+  }
+
+  function strict2() {
+    try {
+      eval(code2);
+      assertUnreachable("did not throw exception");
+    } catch (e) {
+      assertInstanceof(e, SyntaxError);
+    }
+
+    function strict3() {
+      try {
+        eval(code3);
+        assertUnreachable("did not throw exception");
+      } catch (e) {
+        assertInstanceof(e, SyntaxError);
+      }
+
+      function strict4() {
+        try {
+          eval(code4);
+          assertUnreachable("did not throw exception");
+        } catch (e) {
+          assertInstanceof(e, SyntaxError);
+        }
+      }
+      strict4();
+    }
+    strict3();
+  }
+  strict2();
+}
+strict1();
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index 3cb6d32..ddddfab 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -271,3 +271,68 @@
   var y = [void arguments, typeof arguments,
            +arguments, -arguments, ~arguments, !arguments];
 })();
+
+// 7.6.1.2 Future Reserved Words
+var future_reserved_words = [
+  "class",
+  "enum",
+  "export",
+  "extends",
+  "import",
+  "super",
+  "implements",
+  "interface",
+  "let",
+  "package",
+  "private",
+  "protected",
+  "public",
+  "static",
+  "yield" ];
+
+function testFutureReservedWord(word) {
+  // Simple use of each reserved word
+  CheckStrictMode("var " + word + " = 1;", SyntaxError);
+
+  // object literal properties
+  eval("var x = { " + word + " : 42 };");
+  eval("var x = { get " + word + " () {} };");
+  eval("var x = { set " + word + " (value) {} };");
+
+  // object literal with string literal property names
+  eval("var x = { '" + word + "' : 42 };");
+  eval("var x = { get '" + word + "' () { } };");
+  eval("var x = { set '" + word + "' (value) { } };");
+  eval("var x = { get '" + word + "' () { 'use strict'; } };");
+  eval("var x = { set '" + word + "' (value) { 'use strict'; } };");
+
+  // Function names and arguments, strict and non-strict contexts
+  CheckStrictMode("function " + word + " () {}", SyntaxError);
+  CheckStrictMode("function foo (" + word + ") {}", SyntaxError);
+  CheckStrictMode("function foo (" + word + ", " + word + ") {}", SyntaxError);
+  CheckStrictMode("function foo (a, " + word + ") {}", SyntaxError);
+  CheckStrictMode("function foo (" + word + ", a) {}", SyntaxError);
+  CheckStrictMode("function foo (a, " + word + ", b) {}", SyntaxError);
+  CheckStrictMode("var foo = function (" + word + ") {}", SyntaxError);
+
+  // Function names and arguments when the body is strict
+  assertThrows("function " + word + " () { 'use strict'; }", SyntaxError);
+  assertThrows("function foo (" + word + ")  'use strict'; {}", SyntaxError);
+  assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }", SyntaxError);
+  assertThrows("function foo (a, " + word + ") { 'use strict'; }", SyntaxError);
+  assertThrows("function foo (" + word + ", a) { 'use strict'; }", SyntaxError);
+  assertThrows("function foo (a, " + word + ", b) { 'use strict'; }", SyntaxError);
+  assertThrows("var foo = function (" + word + ") { 'use strict'; }", SyntaxError);
+
+  // get/set when the body is strict
+  eval("var x = { get " + word + " () { 'use strict'; } };");
+  eval("var x = { set " + word + " (value) { 'use strict'; } };");
+  assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };", SyntaxError);
+  assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };", SyntaxError);
+}
+
+for (var i = 0; i < future_reserved_words.length; i++) {
+  testFutureReservedWord(future_reserved_words[i]);
+}
+
+
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index cfccc46..3b6a524 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -110,7 +110,7 @@
 js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
 # http://b/issue?id=1206983
 js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
-ecma/Date/15.9.5.10-2: PASS || FAIL if $mode == debug
+ecma/Date/15.9.5.10-2: PASS || (FAIL || TIMEOUT if $mode == debug)
 
 # These tests create two Date objects just after each other and
 # expects them to match.  Sometimes this happens on the border
@@ -169,7 +169,6 @@
 # In Denmark the adjustment starts one week earlier!.
 # Tests based on shell that use dates in this gap are flaky.
 ecma/Date/15.9.5.10-1: PASS || FAIL
-ecma/Date/15.9.5.10-2: PASS || TIMEOUT if ($arch == arm && $mode == debug)
 ecma/Date/15.9.5.12-1: PASS || FAIL
 ecma/Date/15.9.5.14: PASS || FAIL
 ecma/Date/15.9.5.34-1: PASS || FAIL
@@ -821,20 +820,17 @@
 js1_5/extensions/regress-342960: SKIP
 
 # BUG(3251229): Times out when running new crankshaft test script.
-ecma/Date/15.9.5.12-2: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Date/15.9.5.10-2: SKIP
-ecma/Date/15.9.5.8: SKIP
 ecma_3/RegExp/regress-311414: SKIP
-js1_5/Array/regress-99120-02: SKIP
-js1_5/Regress/regress-203278-1: SKIP
 ecma/Date/15.9.5.8: SKIP
 ecma/Date/15.9.5.10-2: SKIP
 ecma/Date/15.9.5.11-2: SKIP
 ecma/Date/15.9.5.12-2: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/extensions/regress-371636: SKIP
+js1_5/Regress/regress-203278-1: SKIP
 js1_5/Regress/regress-404755:  SKIP
 js1_5/Regress/regress-451322: SKIP
-js1_5/extensions/regress-371636: SKIP
+
 
 # BUG(1040): Allow this test to timeout.
 js1_5/GC/regress-203278-2: PASS || TIMEOUT
@@ -865,7 +861,6 @@
 ecma/Date/15.9.5.12-2: SKIP
 ecma/Date/15.9.5.8: SKIP
 ecma/Date/15.9.5.9: SKIP
-ecma/Date/15.9.5.10-2: SKIP
 ecma/Date/15.9.5.11-2: SKIP
 ecma/Expressions/11.7.2: SKIP
 ecma/Expressions/11.10-2: SKIP
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index c5af016..f73c7e1 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -441,6 +441,7 @@
             '../../src/lithium.h',
             '../../src/lithium-allocator.cc',
             '../../src/lithium-allocator.h',
+            '../../src/lithium-allocator-inl.h',
             '../../src/liveedit.cc',
             '../../src/liveedit.h',
             '../../src/liveobjectlist-inl.h',
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index ceeebf9..a980bd2 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -689,6 +689,14 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\lithium.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium.h"
+        >
+      </File>
+       <File
         RelativePath="..\..\src\lithium-allocator.cc"
         >
       </File>
@@ -697,6 +705,10 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\lithium-allocator-inl.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\ia32\lithium-ia32.cc"
         >
       </File>
@@ -713,6 +725,14 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\liveedit.cc"
         >
       </File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index cd4c52e..6aa73da 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -697,6 +697,10 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\lithium-allocator-inl.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\arm\lithium-arm.cc"
         >
       </File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 2c7bf5e..c8ceaa2 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -493,14 +493,6 @@
         >
       </File>
       <File
-        RelativePath="..\..\src\flow-graph.cc"
-        >
-      </File>
-      <File
-        RelativePath="..\..\src\flow-graph.h"
-        >
-      </File>
-      <File
         RelativePath="..\..\src\frame-element.cc"
         >
       </File>
@@ -610,6 +602,22 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\hydrogen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen-instructions.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen-instructions.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\x64\ic-x64.cc"
         >
       </File>
@@ -682,6 +690,14 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\lithium.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium.h"
+        >
+      </File>
+       <File
         RelativePath="..\..\src\lithium-allocator.cc"
         >
       </File>
@@ -690,6 +706,34 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\lithium-allocator-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-codegen-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-codegen-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-gap-resolver-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\lithium-gap-resolver-x64.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\liveedit.cc"
         >
       </File>
@@ -806,6 +850,22 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\preparser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparse-data.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparse-data.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\profile-generator.cc"
         >
       </File>
@@ -930,6 +990,14 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\scanner-base.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner-base.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\scanner.cc"
         >
       </File>
@@ -1086,6 +1154,14 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\v8checks.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8globals.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\v8threads.cc"
         >
       </File>
@@ -1094,6 +1170,10 @@
         >
       </File>
       <File
+        RelativePath="..\..\src\v8utils.h"
+        >
+      </File>
+      <File
         RelativePath="..\..\src\variables.cc"
         >
       </File>
@@ -1157,6 +1237,22 @@
         RelativePath="..\..\src\zone.h"
         >
       </File>
+      <File
+        RelativePath="..\..\src\extensions\externalize-string-extension.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\externalize-string-extension.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\gc-extension.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\gc-extension.h"
+        >
+      </File>
       <Filter
         Name="third party"
         >
@@ -1197,6 +1293,10 @@
         RelativePath="..\..\include\v8.h"
         >
       </File>
+      <File
+        RelativePath="..\..\include\v8stdint.h"
+        >
+      </File>
     </Filter>
   </Files>
   <Globals>