Version 3.8.0.

Fixed handling of arrays in DefineOwnProperty. (issue 1756)

Sync parser and preparser on do-while and return statements. (issue 1856)

Fixed another corner case for DefineOwnProperty on arrays (issue 1756).

Stability and performance improvements on all platforms.


git-svn-id: http://v8.googlecode.com/svn/trunk@10239 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 35b8aa0..7eaadbb 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1462,31 +1462,35 @@
   ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
   LOG_API(isolate, "Script::New");
   ENTER_V8(isolate);
-  i::Handle<i::String> str = Utils::OpenHandle(*source);
-  i::Handle<i::Object> name_obj;
-  int line_offset = 0;
-  int column_offset = 0;
-  if (origin != NULL) {
-    if (!origin->ResourceName().IsEmpty()) {
-      name_obj = Utils::OpenHandle(*origin->ResourceName());
+  i::SharedFunctionInfo* raw_result = NULL;
+  { i::HandleScope scope(isolate);
+    i::Handle<i::String> str = Utils::OpenHandle(*source);
+    i::Handle<i::Object> name_obj;
+    int line_offset = 0;
+    int column_offset = 0;
+    if (origin != NULL) {
+      if (!origin->ResourceName().IsEmpty()) {
+        name_obj = Utils::OpenHandle(*origin->ResourceName());
+      }
+      if (!origin->ResourceLineOffset().IsEmpty()) {
+        line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+      }
+      if (!origin->ResourceColumnOffset().IsEmpty()) {
+        column_offset =
+            static_cast<int>(origin->ResourceColumnOffset()->Value());
+      }
     }
-    if (!origin->ResourceLineOffset().IsEmpty()) {
-      line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+    EXCEPTION_PREAMBLE(isolate);
+    i::ScriptDataImpl* pre_data_impl =
+        static_cast<i::ScriptDataImpl*>(pre_data);
+    // We assert that the pre-data is sane, even though we can actually
+    // handle it if it turns out not to be in release mode.
+    ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
+    // If the pre-data isn't sane we simply ignore it
+    if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+      pre_data_impl = NULL;
     }
-    if (!origin->ResourceColumnOffset().IsEmpty()) {
-      column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
-    }
-  }
-  EXCEPTION_PREAMBLE(isolate);
-  i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
-  // We assert that the pre-data is sane, even though we can actually
-  // handle it if it turns out not to be in release mode.
-  ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
-  // If the pre-data isn't sane we simply ignore it
-  if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
-    pre_data_impl = NULL;
-  }
-  i::Handle<i::SharedFunctionInfo> result =
+    i::Handle<i::SharedFunctionInfo> result =
       i::Compiler::Compile(str,
                            name_obj,
                            line_offset,
@@ -1495,8 +1499,11 @@
                            pre_data_impl,
                            Utils::OpenHandle(*script_data),
                            i::NOT_NATIVES_CODE);
-  has_pending_exception = result.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+    has_pending_exception = result.is_null();
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+    raw_result = *result;
+  }
+  i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
   return Local<Script>(ToApi<Script>(result));
 }
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d0136f5..0e28241 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -394,13 +394,18 @@
   // r5: elements_array_end (untagged)
   // sp[0]: last argument
   Label loop, entry;
+  __ mov(r7, sp);
   __ jmp(&entry);
   __ bind(&loop);
-  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(r2, call_generic_code);
+  }
   __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
   __ bind(&entry);
   __ cmp(r4, r5);
   __ b(lt, &loop);
+  __ mov(sp, r7);
 
   // Remove caller arguments and receiver from the stack, setup return value and
   // return.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 8b1d0c4..282df15 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -3455,110 +3455,202 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
+  CpuFeatures::Scope vfp3_scope(VFP3);
+  const Register base = r1;
+  const Register exponent = r2;
+  const Register heapnumbermap = r5;
+  const Register heapnumber = r0;
+  const DoubleRegister double_base = d1;
+  const DoubleRegister double_exponent = d2;
+  const DoubleRegister double_result = d3;
+  const DoubleRegister double_scratch = d0;
+  const SwVfpRegister single_scratch = s0;
+  const Register scratch = r9;
+  const Register scratch2 = r7;
 
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-
-    Label base_not_smi;
-    Label exponent_not_smi;
-    Label convert_exponent;
-
-    const Register base = r0;
-    const Register exponent = r1;
-    const Register heapnumbermap = r5;
-    const Register heapnumber = r6;
-    const DoubleRegister double_base = d0;
-    const DoubleRegister double_exponent = d1;
-    const DoubleRegister double_result = d2;
-    const SwVfpRegister single_scratch = s0;
-    const Register scratch = r9;
-    const Register scratch2 = r7;
-
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+  Label call_runtime, done, exponent_not_smi, int_exponent;
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
     __ ldr(base, MemOperand(sp, 1 * kPointerSize));
     __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    // Convert base to double value and store it in d0.
-    __ JumpIfNotSmi(base, &base_not_smi);
-    // Base is a Smi. Untag and convert it.
-    __ SmiUntag(base);
-    __ vmov(single_scratch, base);
-    __ vcvt_f64_s32(double_base, single_scratch);
-    __ b(&convert_exponent);
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
 
-    __ bind(&base_not_smi);
+    __ JumpIfSmi(base, &base_is_smi);
     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
-    // Base is a heapnumber. Load it into double register.
-    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
 
-    __ bind(&convert_exponent);
+    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent);
+
+    __ bind(&base_is_smi);
+    __ SmiUntag(base);
+    __ vmov(single_scratch, base);
+    __ vcvt_f64_s32(double_base, single_scratch);
+    __ bind(&unpack_exponent);
+
     __ JumpIfNotSmi(exponent, &exponent_not_smi);
     __ SmiUntag(exponent);
-
-    // The base is in a double register and the exponent is
-    // an untagged smi. Allocate a heap number and call a
-    // C function for integer exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(lr);
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(double_base, exponent);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ CallCFunction(
-          ExternalReference::power_double_int_function(masm->isolate()),
-          1, 1);
-      __ pop(lr);
-      __ GetCFunctionDoubleResult(double_result);
-    }
-    __ vstr(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(r0, heapnumber);
-    __ Ret(2 * kPointerSize);
+    __ jmp(&int_exponent);
 
     __ bind(&exponent_not_smi);
     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
-    // Exponent is a heapnumber. Load it into double register.
     __ vldr(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    // Base is already in double_base.
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
 
-    // The base and the exponent are in double registers.
-    // Allocate a heap number and call a C function for
-    // double exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
+    __ bind(&exponent_not_smi);
+    __ vldr(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type_ != INTEGER) {
+    Label int_exponent_convert;
+    // Detect integer exponents stored as double.
+    __ vcvt_u32_f64(single_scratch, double_exponent);
+    // We do not check for NaN or Infinity here because comparing numbers on
+    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
+    __ vcvt_f64_u32(double_scratch, single_scratch);
+    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
+    __ b(eq, &int_exponent_convert);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half;
+
+      // Test for 0.5.
+      __ vmov(double_scratch, 0.5);
+      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+      __ b(ne, &not_plus_half);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ vmov(double_scratch, -V8_INFINITY);
+      __ VFPCompareAndSetFlags(double_base, double_scratch);
+      __ vneg(double_result, double_scratch, eq);
+      __ b(eq, &done);
+
+      // Add +0 to convert -0 to +0.
+      __ vadd(double_scratch, double_base, kDoubleRegZero);
+      __ vsqrt(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&not_plus_half);
+      __ vmov(double_scratch, -0.5);
+      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+      __ b(ne, &call_runtime);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ vmov(double_scratch, -V8_INFINITY);
+      __ VFPCompareAndSetFlags(double_base, double_scratch);
+      __ vmov(double_result, kDoubleRegZero, eq);
+      __ b(eq, &done);
+
+      // Add +0 to convert -0 to +0.
+      __ vadd(double_scratch, double_base, kDoubleRegZero);
+      __ vmov(double_result, 1);
+      __ vsqrt(double_scratch, double_scratch);
+      __ vdiv(double_result, double_result, double_scratch);
+      __ jmp(&done);
+    }
+
     __ push(lr);
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(double_base, double_exponent);
     {
       AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
       __ CallCFunction(
           ExternalReference::power_double_double_function(masm->isolate()),
           0, 2);
-      __ pop(lr);
-      __ GetCFunctionDoubleResult(double_result);
     }
-    __ vstr(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(r0, heapnumber);
-    __ Ret(2 * kPointerSize);
+    __ pop(lr);
+    __ GetCFunctionDoubleResult(double_result);
+    __ jmp(&done);
+
+    __ bind(&int_exponent_convert);
+    __ vcvt_u32_f64(single_scratch, double_exponent);
+    __ vmov(exponent, single_scratch);
   }
 
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  __ mov(scratch, exponent);  // Back up exponent.
+  __ vmov(double_scratch, double_base);  // Back up base.
+  __ vmov(double_result, 1.0);
+
+  // Get absolute value of exponent.
+  __ cmp(scratch, Operand(0));
+  __ mov(scratch2, Operand(0), LeaveCC, mi);
+  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
+
+  Label while_true;
+  __ bind(&while_true);
+  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
+  __ vmul(double_result, double_result, double_scratch, cs);
+  __ vmul(double_scratch, double_scratch, double_scratch, ne);
+  __ b(ne, &while_true);
+
+  __ cmp(exponent, Operand(0));
+  __ b(ge, &done);
+  __ vmov(double_scratch, 1.0);
+  __ vdiv(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ VFPCompareAndSetFlags(double_result, 0.0);
+  __ b(ne, &done);
+  // double_exponent may not containe the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ vmov(single_scratch, exponent);
+  __ vcvt_f64_s32(double_exponent, single_scratch);
+
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(
+        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+    __ vstr(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    ASSERT(heapnumber.is(r0));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret(2);
+  } else {
+    __ push(lr);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(lr);
+    __ GetCFunctionDoubleResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
 }
 
 
@@ -6628,26 +6720,47 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  __ Push(r1, r0);
-  __ push(lr);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ JumpIfSmi(r2, &miss);
+  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r2, Operand(known_map_));
+  __ b(ne, &miss);
+  __ cmp(r3, Operand(known_map_));
+  __ b(ne, &miss);
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss =
-      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  __ sub(r0, r0, Operand(r1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(r1, r0);
+    __ push(lr);
+    __ Push(r1, r0);
     __ mov(ip, Operand(Smi::FromInt(op_)));
     __ push(ip);
     __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ pop(lr);
+    __ pop(r0);
+    __ pop(r1);
   }
-  // Compute the entry point of the rewritten stub.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  // Restore registers.
-  __ pop(lr);
-  __ pop(r0);
-  __ pop(r1);
+
   __ Jump(r2);
 }
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index fdd3266..7e9a889 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -2938,8 +2938,12 @@
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  MathPowStub stub;
-  __ CallStub(&stub);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    MathPowStub stub(MathPowStub::ON_STACK);
+    __ CallStub(&stub);
+  } else {
+    __ CallRuntime(Runtime::kMath_pow, 2);
+  }
   context()->Plug(r0);
 }
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index f8e4bbb..abbac99 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1587,6 +1587,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 2341774..1ac152d 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1153,6 +1153,11 @@
     LOperand* input = UseFixedDouble(instr->value(), d2);
     LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
     return MarkAsCall(DefineFixedDouble(result, d2), instr);
+  } else if (op == kMathPowHalf) {
+    LOperand* input = UseFixedDouble(instr->value(), d2);
+    LOperand* temp = FixedTemp(d3);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+    return DefineFixedDouble(result, d2);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1166,8 +1171,6 @@
         return DefineAsRegister(result);
       case kMathRound:
         return AssignEnvironment(DefineAsRegister(result));
-      case kMathPowHalf:
-        return DefineAsRegister(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1402,7 +1405,7 @@
   LOperand* left = UseFixedDouble(instr->left(), d1);
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), d2) :
-      UseFixed(instr->right(), r0);
+      UseFixed(instr->right(), r2);
   LPower* result = new LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, d3),
                     instr,
@@ -1795,7 +1798,8 @@
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1809,7 +1813,8 @@
     context = UseRegister(instr->context());
     value = UseRegister(instr->value());
   }
-  return new LStoreContextSlot(context, value);
+  LInstruction* result = new LStoreContextSlot(context, value);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 6051ad9..628c3d1 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1793,6 +1793,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return InputAt(0); }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 22a504f..25532a2 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -321,7 +321,22 @@
   if (op->IsRegister()) {
     return ToRegister(op->index());
   } else if (op->IsConstantOperand()) {
-    __ mov(scratch, ToOperand(op));
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort("EmitLoadRegister: Unsupported double immediate.");
+    } else {
+      ASSERT(r.IsTagged());
+      if (literal->IsSmi()) {
+        __ mov(scratch, Operand(literal));
+      } else {
+       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+      }
+    }
     return scratch;
   } else if (op->IsStackSlot() || op->IsArgument()) {
     __ ldr(scratch, ToMemOperand(op));
@@ -1337,8 +1352,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ mov(ToRegister(instr->result()), Operand(value));
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
 }
 
 
@@ -2164,7 +2184,7 @@
   // offset to the location of the map check.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(temp.is(r4));
-  __ mov(InstanceofStub::right(), Operand(instr->function()));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 4;
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   Label before_push_delta;
@@ -2263,21 +2283,7 @@
 
   // Store the value.
   __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-
-  // Cells are always in the remembered set.
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    __ RecordWriteField(scratch,
-                        JSGlobalPropertyCell::kValueOffset,
-                        value,
-                        scratch2,
-                        kLRHasBeenSaved,
-                        kSaveFPRegs,
-                        OMIT_REMEMBERED_SET,
-                        check_needed);
-  }
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2297,6 +2303,11 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    DeoptimizeIf(eq, instr->environment());
+  }
 }
 
 
@@ -2304,6 +2315,13 @@
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
   MemOperand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Register scratch = scratch0();
+    __ ldr(scratch, target);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch, ip);
+    DeoptimizeIf(eq, instr->environment());
+  }
   __ str(value, target);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
     HType type = instr->hydrogen()->value()->type();
@@ -2355,7 +2373,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
   }
 }
 
@@ -2800,7 +2818,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  LoadHeapObject(result, instr->hydrogen()->closure());
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2868,7 +2886,7 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ mov(r1, Operand(instr->function()));
+  __ LoadHeapObject(r1, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -3053,11 +3071,11 @@
   __ and_(scratch, result, Operand(HeapNumber::kSignMask));
 
   __ Vmov(double_scratch0(), 0.5);
-  __ vadd(input, input, double_scratch0());
+  __ vadd(double_scratch0(), input, double_scratch0());
 
   // Check sign of the result: if the sign changed, the input
   // value was in ]0.5, 0[ and the result should be -0.
-  __ vmov(result, input.high());
+  __ vmov(result, double_scratch0().high());
   __ eor(result, result, Operand(scratch), SetCC);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     DeoptimizeIf(mi, instr->environment());
@@ -3068,7 +3086,7 @@
 
   __ EmitVFPTruncate(kRoundToMinusInf,
                      double_scratch0().low(),
-                     input,
+                     double_scratch0(),
                      result,
                      scratch);
   DeoptimizeIf(ne, instr->environment());
@@ -3097,68 +3115,53 @@
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ vmov(temp, -V8_INFINITY);
+  __ VFPCompareAndSetFlags(input, temp);
+  __ vneg(result, temp, eq);
+  __ b(&done, eq);
+
   // Add +0 to convert -0 to +0.
   __ vadd(result, input, kDoubleRegZero);
   __ vsqrt(result, result);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  Register scratch = scratch0();
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  if (exponent_type.IsDouble()) {
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left),
-                               ToDoubleRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
-  } else if (exponent_type.IsInteger32()) {
-    ASSERT(ToRegister(right).is(r0));
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 1, 1);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(d2));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(r2));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
+  ASSERT(ToDoubleRegister(instr->result()).is(d3));
 
-    Register right_reg = ToRegister(right);
-
-    // Check for smi on the right hand side.
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-
-    // Untag smi and convert it to a double.
-    __ SmiUntag(right_reg);
-    SwVfpRegister single_scratch = double_scratch0().low();
-    __ vmov(single_scratch, right_reg);
-    __ vcvt_f64_s32(result_reg, single_scratch);
-    __ jmp(&call);
-
-    // Heap number map check.
-    __ bind(&non_smi);
-    __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(r2, &no_deopt);
+    __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-    __ cmp(scratch, Operand(ip));
+    __ cmp(r7, Operand(ip));
     DeoptimizeIf(ne, instr->environment());
-    int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
-    __ add(scratch, right_reg, Operand(value_offset));
-    __ vldr(result_reg, scratch, 0);
-
-    // Prepare arguments and call C function.
-    __ bind(&call);
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-  // Store the result in the result register.
-  __ GetCFunctionDoubleResult(result_reg);
 }
 
 
@@ -3294,7 +3297,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ mov(r1, Operand(instr->target()));
+  __ LoadHeapObject(r1, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -4118,9 +4121,18 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  __ cmp(reg, Operand(instr->hydrogen()->target()));
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ mov(ip, Operand(Handle<Object>(cell)));
+    __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+    __ cmp(reg, ip);
+  } else {
+    __ cmp(reg, Operand(target));
+  }
   DeoptimizeIf(ne, instr->environment());
 }
 
@@ -4189,19 +4201,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result,
-                              Handle<HeapObject> object) {
-  if (heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        factory()->NewJSGlobalPropertyCell(object);
-    __ mov(result, Operand(cell));
-    __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
-  } else {
-    __ mov(result, Operand(object));
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register temp1 = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
@@ -4210,7 +4209,7 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(temp1, current_prototype);
+  __ LoadHeapObject(temp1, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -4220,7 +4219,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(temp1, current_prototype);
+    __ LoadHeapObject(temp1, current_prototype);
   }
 
   // Check the holder map.
@@ -4231,15 +4230,31 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
+    // Load map into r2.
+    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    __ cmp(r2, Operand(boilerplate_elements_kind));
+    DeoptimizeIf(ne, instr->environment());
+  }
 
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r1, Operand(constant_elements));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
   __ Push(r3, r2, r1);
 
   // Pick the right runtime function or stub to call.
@@ -4256,9 +4271,9 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
@@ -4297,10 +4312,10 @@
       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
       __ add(r2, result, Operand(*offset));
       __ str(r2, FieldMemOperand(result, total_offset));
-      LoadHeapObject(source, value_object);
+      __ LoadHeapObject(source, value_object);
       EmitDeepCopy(value_object, result, source, offset);
     } else if (value->IsHeapObject()) {
-      LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+      __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
       __ str(r2, FieldMemOperand(result, total_offset));
     } else {
       __ mov(r2, Operand(value));
@@ -4326,7 +4341,7 @@
 
   __ bind(&allocated);
   int offset = 0;
-  LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
   EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
   ASSERT_EQ(size, offset);
 }
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 4fc3b03..59a5e5b 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -407,6 +407,19 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    mov(result, Operand(cell));
+    ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    mov(result, Operand(object));
+  }
+}
+
+
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cond,
@@ -1111,7 +1124,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(r1, Operand(function));
+  LoadHeapObject(r1, function);
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2725883..9d74633 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -166,6 +166,8 @@
                  Heap::RootListIndex index,
                  Condition cond = al);
 
+  void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
   // ---------------------------------------------------------------------------
   // GC Support
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 47778f5..b6b2ee2 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -575,7 +575,7 @@
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
-  __ mov(r5, Operand(function));
+  __ LoadHeapObject(r5, function);
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
@@ -1099,7 +1099,7 @@
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<Object> value,
+                                        Handle<JSFunction> value,
                                         Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
@@ -1110,7 +1110,7 @@
       object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(r0, Operand(value));
+  __ LoadHeapObject(r0, value);
   __ Ret();
 }
 
@@ -2587,15 +2587,7 @@
 
   // Store the value in the cell.
   __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
-
-  __ mov(r1, r0);
-  __ RecordWriteField(r4,
-                      JSGlobalPropertyCell::kValueOffset,
-                      r1,
-                      r2,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET);
+  // Cells are always rescanned, so no write barrier here.
 
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2690,7 +2682,7 @@
 
 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
                                                    Handle<JSObject> holder,
-                                                   Handle<Object> value,
+                                                   Handle<JSFunction> value,
                                                    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
@@ -2830,7 +2822,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<Object> value) {
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
diff --git a/src/assembler.cc b/src/assembler.cc
index bc05c01..941f45c 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1113,17 +1113,9 @@
 
 
 double power_double_double(double x, double y) {
-  int y_int = static_cast<int>(y);
-  if (y == y_int) {
-    return power_double_int(x, y_int);  // Returns 1.0 for exponent 0.
-  }
-  if (!isinf(x)) {
-    if (y == 0.5) return sqrt(x + 0.0);  // -0 must be converted to +0.
-    if (y == -0.5) return 1.0 / sqrt(x + 0.0);
-  }
-  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-    return OS::nan_value();
-  }
+  // The checks for special cases can be dropped in ia32 because it has already
+  // been done in generated code before bailing out here.
+  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
   return pow(x, y);
 }
 
diff --git a/src/ast.cc b/src/ast.cc
index 13e5589..0793356 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -70,6 +70,7 @@
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
       is_trivial_(false),
+      is_lvalue_(false),
       position_(RelocInfo::kNoPosition) {
   BindTo(var);
 }
@@ -84,6 +85,7 @@
       var_(NULL),
       is_this_(is_this),
       is_trivial_(false),
+      is_lvalue_(false),
       position_(position) {
   // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
diff --git a/src/ast.h b/src/ast.h
index 805526a..9b90d81 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1159,12 +1159,17 @@
 
   bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
 
+  bool IsLValue() {
+    return is_lvalue_;
+  }
+
   Handle<String> name() const { return name_; }
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
   int position() const { return position_; }
 
   void MarkAsTrivial() { is_trivial_ = true; }
+  void MarkAsLValue() { is_lvalue_ = true; }
 
   // Bind this proxy to the variable var.
   void BindTo(Variable* var);
@@ -1174,6 +1179,9 @@
   Variable* var_;  // resolved variable, or NULL
   bool is_this_;
   bool is_trivial_;
+  // True if this variable proxy is being used in an assignment
+  // or with a increment/decrement operator.
+  bool is_lvalue_;
   int position_;
 };
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 29c16ee..6d388a5 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -299,7 +299,7 @@
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
   extensions_cache_.Iterate(v);
-  v->Synchronize("Extensions");
+  v->Synchronize(VisitorSynchronization::kExtensions);
 }
 
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 43cf358..4d874c5 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -233,30 +233,57 @@
     return array->Initialize(JSArray::kPreallocatedArrayElements);
   }
 
-  // Take the arguments as elements.
-  int number_of_elements = args.length() - 1;
-  Smi* len = Smi::FromInt(number_of_elements);
-  Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-
   // Set length and elements on the array.
+  int number_of_elements = args.length() - 1;
   MaybeObject* maybe_object =
-      array->EnsureCanContainElements(FixedArray::cast(obj));
+      array->EnsureCanContainElements(&args, 1, number_of_elements,
+                                      ALLOW_CONVERTED_DOUBLE_ELEMENTS);
   if (maybe_object->IsFailure()) return maybe_object;
 
-  AssertNoAllocation no_gc;
-  FixedArray* elms = FixedArray::cast(obj);
-  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+  // Allocate an appropriately typed elements array.
+  MaybeObject* maybe_elms;
+  ElementsKind elements_kind = array->GetElementsKind();
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+        number_of_elements);
+  } else {
+    maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+  }
+  FixedArrayBase* elms;
+  if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
+
   // Fill in the content
-  for (int index = 0; index < number_of_elements; index++) {
-    elms->set(index, args[index+1], mode);
+  switch (array->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS: {
+      FixedArray* smi_elms = FixedArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        smi_elms->set(index, args[index+1], SKIP_WRITE_BARRIER);
+      }
+      break;
+    }
+    case FAST_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+      FixedArray* object_elms = FixedArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        object_elms->set(index, args[index+1], mode);
+      }
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        double_elms->set(index, args[index+1]->Number());
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
   }
 
-  array->set_elements(FixedArray::cast(obj));
-  array->set_length(len);
-
+  array->set_elements(elms);
+  array->set_length(Smi::FromInt(number_of_elements));
   return array;
 }
 
@@ -424,7 +451,8 @@
   MaybeObject* maybe_array = array->EnsureCanContainElements(
       args,
       first_added_arg,
-      args_length - first_added_arg);
+      args_length - first_added_arg,
+      DONT_ALLOW_DOUBLE_ELEMENTS);
   if (maybe_array->IsFailure()) return maybe_array;
   return array->elements();
 }
@@ -627,7 +655,8 @@
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
   MaybeObject* maybe_object =
-      array->EnsureCanContainElements(&args, 1, to_add);
+      array->EnsureCanContainElements(&args, 1, to_add,
+                                      DONT_ALLOW_DOUBLE_ELEMENTS);
   if (maybe_object->IsFailure()) return maybe_object;
 
   if (new_length > elms->length()) {
@@ -758,7 +787,8 @@
   FixedArray* result_elms = FixedArray::cast(result);
 
   MaybeObject* maybe_object =
-      result_array->EnsureCanContainElements(result_elms);
+      result_array->EnsureCanContainElements(result_elms,
+                                             DONT_ALLOW_DOUBLE_ELEMENTS);
   if (maybe_object->IsFailure()) return maybe_object;
 
   AssertNoAllocation no_gc;
@@ -1022,7 +1052,7 @@
     for (int i = 0; i < n_arguments; i++) {
       JSArray* array = JSArray::cast(args[i]);
       if (!array->HasFastSmiOnlyElements()) {
-        result_array->EnsureCanContainNonSmiElements();
+        result_array->EnsureCanContainHeapObjectElements();
         break;
       }
     }
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index ba7df80..85410c3 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -101,7 +101,14 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
   Code* code;
-  if (!FindCodeInCache(&code)) {
+  if (UseSpecialCache()
+      ? FindCodeInSpecialCache(&code)
+      : FindCodeInCache(&code)) {
+    ASSERT(IsPregenerated() == code->is_pregenerated());
+    return Handle<Code>(code);
+  }
+
+  {
     HandleScope scope(isolate);
 
     // Generate the new code.
@@ -121,19 +128,21 @@
     RecordCodeGeneration(*new_object, &masm);
     FinishCode(new_object);
 
-    // Update the dictionary and the root in Heap.
-    Handle<NumberDictionary> dict =
-        factory->DictionaryAtNumberPut(
-            Handle<NumberDictionary>(heap->code_stubs()),
-            GetKey(),
-            new_object);
-    heap->public_set_code_stubs(*dict);
+    if (UseSpecialCache()) {
+      AddToSpecialCache(new_object);
+    } else {
+      // Update the dictionary and the root in Heap.
+      Handle<NumberDictionary> dict =
+          factory->DictionaryAtNumberPut(
+              Handle<NumberDictionary>(heap->code_stubs()),
+              GetKey(),
+              new_object);
+      heap->public_set_code_stubs(*dict);
+    }
     code = *new_object;
-    Activate(code);
-  } else {
-    CHECK(IsPregenerated() == code->is_pregenerated());
   }
 
+  Activate(code);
   ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
   return Handle<Code>(code, isolate);
 }
@@ -159,6 +168,32 @@
 }
 
 
+void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
+  ASSERT(*known_map_ != NULL);
+  Isolate* isolate = new_object->GetIsolate();
+  Factory* factory = isolate->factory();
+  return Map::UpdateCodeCache(known_map_,
+                              factory->compare_ic_symbol(),
+                              new_object);
+}
+
+
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+  Isolate* isolate = known_map_->GetIsolate();
+  Factory* factory = isolate->factory();
+  Code::Flags flags = Code::ComputeFlags(
+      static_cast<Code::Kind>(GetCodeKind()),
+      UNINITIALIZED);
+  Handle<Object> probe(
+      known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
+  if (probe->IsCode()) {
+    *code_out = Code::cast(*probe);
+    return true;
+  }
+  return false;
+}
+
+
 int ICCompareStub::MinorKey() {
   return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
 }
@@ -184,6 +219,10 @@
     case CompareIC::OBJECTS:
       GenerateObjects(masm);
       break;
+    case CompareIC::KNOWN_OBJECTS:
+      ASSERT(*known_map_ != NULL);
+      GenerateKnownObjects(masm);
+      break;
     default:
       UNREACHABLE();
   }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 6bda5da..34da148 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -194,6 +194,17 @@
     return UNINITIALIZED;
   }
 
+  // Add the code to a specialized cache, specific to an individual
+  // stub type. Please note, this method must add the code object to a
+  // roots object, otherwise we will remove the code during GC.
+  virtual void AddToSpecialCache(Handle<Code> new_object) { }
+
+  // Find code in a specialized cache, work is delegated to the specific stub.
+  virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+
+  // If a stub uses a special cache override this.
+  virtual bool UseSpecialCache() { return false; }
+
   // Returns a name for logging/debugging purposes.
   SmartArrayPointer<const char> GetName();
   virtual void PrintName(StringStream* stream);
@@ -442,12 +453,17 @@
 
 class MathPowStub: public CodeStub {
  public:
-  MathPowStub() {}
+  enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+
+  explicit MathPowStub(ExponentType exponent_type)
+      : exponent_type_(exponent_type) { }
   virtual void Generate(MacroAssembler* masm);
 
  private:
   virtual CodeStub::Major MajorKey() { return MathPow; }
-  virtual int MinorKey() { return 0; }
+  virtual int MinorKey() { return exponent_type_; }
+
+  ExponentType exponent_type_;
 };
 
 
@@ -460,6 +476,8 @@
 
   virtual void Generate(MacroAssembler* masm);
 
+  void set_known_map(Handle<Map> map) { known_map_ = map; }
+
  private:
   class OpField: public BitField<int, 0, 3> { };
   class StateField: public BitField<int, 3, 5> { };
@@ -479,12 +497,18 @@
   void GenerateStrings(MacroAssembler* masm);
   void GenerateObjects(MacroAssembler* masm);
   void GenerateMiss(MacroAssembler* masm);
+  void GenerateKnownObjects(MacroAssembler* masm);
 
   bool strict() const { return op_ == Token::EQ_STRICT; }
   Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
 
+  virtual void AddToSpecialCache(Handle<Code> new_object);
+  virtual bool FindCodeInSpecialCache(Code** code_out);
+  virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+
   Token::Value op_;
   CompareIC::State state_;
+  Handle<Map> known_map_;
 };
 
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 16ccfa0..d2a4a0b 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -398,7 +398,7 @@
   FunctionLiteral* lit = info->function();
   LiveEditFunctionTracker live_edit_tracker(isolate, lit);
   if (!MakeCode(info)) {
-    isolate->StackOverflow();
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
     return Handle<SharedFunctionInfo>::null();
   }
 
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 591d0b3..c30afa8 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -229,8 +229,6 @@
 
 
 const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-const int DebuggerAgentUtil::kContentLengthSize =
-    StrLength(kContentLength);
 
 
 SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a07fb0f..6115190 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -115,7 +115,6 @@
 class DebuggerAgentUtil {
  public:
   static const char* const kContentLength;
-  static const int kContentLengthSize;
 
   static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
   static bool SendConnectMessage(const Socket* conn,
diff --git a/src/debug.cc b/src/debug.cc
index c654dfb..1e970e1 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1796,8 +1796,9 @@
           }
         } else if (frame->function()->IsJSFunction()) {
           JSFunction* function = JSFunction::cast(frame->function());
-          if (function->code()->kind() == Code::FUNCTION &&
-              !function->code()->has_debug_break_slots()) {
+          ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+          if (!frame->LookupCode()->has_debug_break_slots() ||
+              !function->shared()->code()->has_debug_break_slots()) {
             active_functions.Add(Handle<JSFunction>(function));
           }
         }
@@ -1853,20 +1854,16 @@
       if (function->code() == *lazy_compile) {
         function->set_code(shared->code());
       }
-      Handle<Code> current_code(function->code());
-      if (shared->code()->has_debug_break_slots()) {
-        // if the code is already recompiled to have break slots skip
-        // recompilation.
-        ASSERT(!function->code()->has_debug_break_slots());
-      } else {
+      if (!shared->code()->has_debug_break_slots()) {
         // Try to compile the full code with debug break slots. If it
         // fails just keep the current code.
-        ASSERT(shared->code() == *current_code);
+        Handle<Code> current_code(function->shared()->code());
         ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
         shared->set_code(*lazy_compile);
         bool prev_force_debugger_active =
             isolate_->debugger()->force_debugger_active();
         isolate_->debugger()->set_force_debugger_active(true);
+        ASSERT(current_code->kind() == Code::FUNCTION);
         CompileFullCodeForDebugging(shared, current_code);
         isolate_->debugger()->set_force_debugger_active(
             prev_force_debugger_active);
@@ -1883,10 +1880,13 @@
         // If the current frame is for this function in its
         // non-optimized form rewrite the return address to continue
         // in the newly compiled full code with debug break slots.
-        if (frame->function()->IsJSFunction() &&
-            frame->function() == *function &&
-            frame->LookupCode()->kind() == Code::FUNCTION) {
-          intptr_t delta = frame->pc() - current_code->instruction_start();
+        if (!frame->is_optimized() &&
+            frame->function()->IsJSFunction() &&
+            frame->function() == *function) {
+          ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+          Handle<Code> frame_code(frame->LookupCode());
+          if (frame_code->has_debug_break_slots()) continue;
+          intptr_t delta = frame->pc() - frame_code->instruction_start();
           int debug_break_slot_count = 0;
           int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
           for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
@@ -1915,11 +1915,11 @@
                    "for debugging, "
                    "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
                    reinterpret_cast<intptr_t>(
-                       current_code->instruction_start()),
+                       frame_code->instruction_start()),
                    reinterpret_cast<intptr_t>(
-                       current_code->instruction_start()) +
-                       current_code->instruction_size(),
-                   current_code->instruction_size(),
+                       frame_code->instruction_start()) +
+                       frame_code->instruction_size(),
+                   frame_code->instruction_size(),
                    reinterpret_cast<intptr_t>(new_code->instruction_start()),
                    reinterpret_cast<intptr_t>(new_code->instruction_start()) +
                        new_code->instruction_size(),
diff --git a/src/elements.cc b/src/elements.cc
index ef55d54..fd2b6d2 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -134,6 +134,22 @@
                                 JSObject* obj,
                                 Object* length);
 
+  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                            int capacity,
+                                            int length) {
+    return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+        array,
+        capacity,
+        length);
+  }
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       int capacity,
+                                                       int length) {
+    UNIMPLEMENTED();
+    return obj;
+  }
+
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
@@ -376,11 +392,6 @@
     return heap->true_value();
   }
 
- protected:
-  friend class FastElementsAccessor<FastObjectElementsAccessor,
-                                    FixedArray,
-                                    kPointerSize>;
-
   static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
                                                        uint32_t capacity,
                                                        uint32_t length) {
@@ -393,6 +404,11 @@
                                                  set_capacity_mode);
   }
 
+ protected:
+  friend class FastElementsAccessor<FastObjectElementsAccessor,
+                                    FixedArray,
+                                    kPointerSize>;
+
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) {
@@ -405,6 +421,12 @@
     : public FastElementsAccessor<FastDoubleElementsAccessor,
                                   FixedDoubleArray,
                                   kDoubleSize> {
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       uint32_t capacity,
+                                                       uint32_t length) {
+    return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+  }
+
  protected:
   friend class ElementsAccessorBase<FastDoubleElementsAccessor,
                                     FixedDoubleArray>;
@@ -412,12 +434,6 @@
                                     FixedDoubleArray,
                                     kDoubleSize>;
 
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       uint32_t capacity,
-                                                       uint32_t length) {
-    return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
-  }
-
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) {
diff --git a/src/elements.h b/src/elements.h
index ed1ca5e..a2a184d 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -44,11 +44,24 @@
                            JSObject* holder,
                            Object* receiver) = 0;
 
-  // Modifies the length data property as specified for JSArrays and resizes
-  // the underlying backing store accordingly.
+  // Modifies the length data property as specified for JSArrays and resizes the
+  // underlying backing store accordingly. The method honors the semantics of
+  // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
+  // have non-deletable elements can only be shrunk to the size of highest
+  // element that is non-deletable.
   virtual MaybeObject* SetLength(JSObject* holder,
                                  Object* new_length) = 0;
 
+  // Modifies both the length and capacity of a JSArray, resizing the underlying
+  // backing store as necessary. This method does NOT honor the semantics of
+  // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
+  // elements. This method should only be called for array expansion OR by
+  // runtime JavaScript code that use InternalArrays and don't care about
+  // EcmaScript 5.1 semantics.
+  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                            int capacity,
+                                            int length) = 0;
+
   virtual MaybeObject* Delete(JSObject* holder,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
diff --git a/src/factory.cc b/src/factory.cc
index f1042a4..c2976a5 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -926,28 +926,48 @@
 }
 
 
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
                                                 PretenureFlag pretenure) {
   Handle<JSArray> result =
       Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
                                         pretenure));
+  result->set_length(Smi::FromInt(0));
   SetContent(result, elements);
   return result;
 }
 
 
+void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
+                                           int capacity,
+                                           int length) {
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      accessor->SetCapacityAndLength(*array, capacity, length));
+}
+
+
 void Factory::SetContent(Handle<JSArray> array,
-                         Handle<FixedArray> elements) {
+                         Handle<FixedArrayBase> elements) {
   CALL_HEAP_FUNCTION_VOID(
       isolate(),
       array->SetContent(*elements));
 }
 
 
-void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
   CALL_HEAP_FUNCTION_VOID(
       isolate(),
-      array->EnsureCanContainNonSmiElements());
+      array->EnsureCanContainHeapObjectElements());
+}
+
+
+void Factory::EnsureCanContainElements(Handle<JSArray> array,
+                                       Handle<FixedArrayBase> elements,
+                                       EnsureElementsMode mode) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->EnsureCanContainElements(*elements, mode));
 }
 
 
diff --git a/src/factory.h b/src/factory.h
index 0f028e5..e9a43fd 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -259,12 +259,19 @@
                              PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArrayWithElements(
-      Handle<FixedArray> elements,
+      Handle<FixedArrayBase> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
-  void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+  void SetElementsCapacityAndLength(Handle<JSArray> array,
+                                    int capacity,
+                                    int length);
 
-  void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+  void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+
+  void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
+  void EnsureCanContainElements(Handle<JSArray> array,
+                                Handle<FixedArrayBase> elements,
+                                EnsureElementsMode mode);
 
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
diff --git a/src/frames.cc b/src/frames.cc
index 9fd0042..e3ed2de 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -723,12 +723,17 @@
       JavaScriptFrame* frame = it.frame();
       if (frame->IsConstructor()) PrintF(file, "new ");
       // function name
-      Object* fun = frame->function();
-      if (fun->IsJSFunction()) {
-        SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
-        shared->DebugName()->ShortPrint(file);
+      Object* maybe_fun = frame->function();
+      if (maybe_fun->IsJSFunction()) {
+        JSFunction* fun = JSFunction::cast(maybe_fun);
+        fun->PrintName();
+        Code* js_code = frame->unchecked_code();
+        Address pc = frame->pc();
+        int code_offset =
+            static_cast<int>(pc - js_code->instruction_start());
+        PrintF("+%d", code_offset);
+        SharedFunctionInfo* shared = fun->shared();
         if (print_line_number) {
-          Address pc = frame->pc();
           Code* code = Code::cast(
               v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
           int source_pos = code->SourcePosition(pc);
@@ -751,7 +756,7 @@
           }
         }
       } else {
-        fun->ShortPrint(file);
+        PrintF("<unknown>");
       }
 
       if (print_args) {
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 8977cdb..ef6e58e 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -125,7 +125,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map(map);
+  // String maps are all immortal immovable objects.
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(str.length());
diff --git a/src/heap.cc b/src/heap.cc
index f948c6c..bc7550e 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -80,7 +80,7 @@
 #endif
       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
-      initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      initial_semispace_size_(Page::kPageSize),
       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
       max_executable_size_(128l * LUMP_OF_MEMORY),
 
@@ -1012,7 +1012,7 @@
       // Store Buffer overflowed while scanning promoted objects.  These are not
       // in any particular page, though they are likely to be clustered by the
       // allocation routines.
-      store_buffer_->HandleFullness();
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
     } else {
       // Store Buffer overflowed while scanning a particular old space page for
       // pointers to new space.
@@ -1813,7 +1813,7 @@
   }
 
   Map* map = reinterpret_cast<Map*>(result);
-  map->set_map_unsafe(meta_map());
+  map->set_map_no_write_barrier(meta_map());
   map->set_instance_type(instance_type);
   map->set_visitor_id(
       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
@@ -2173,7 +2173,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -2191,7 +2191,7 @@
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -2202,7 +2202,8 @@
   { MaybeObject* maybe_result = AllocateRawCell();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(
+      global_property_cell_map());
   JSGlobalPropertyCell::cast(result)->set_value(value);
   return result;
 }
@@ -2416,6 +2417,7 @@
   }
   set_code_stubs(NumberDictionary::cast(obj));
 
+
   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   // is set to avoid expanding the dictionary during bootstrapping.
   { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
@@ -2543,7 +2545,7 @@
       }
     }
   }
-  array->set_map(heap->fixed_cow_array_map());
+  array->set_map_no_write_barrier(heap->fixed_cow_array_map());
 }
 
 
@@ -3139,7 +3141,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+      byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -3157,7 +3160,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+      byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -3167,11 +3171,11 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map_unsafe(one_pointer_filler_map());
+    filler->set_map_no_write_barrier(one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map_unsafe(two_pointer_filler_map());
+    filler->set_map_no_write_barrier(two_pointer_filler_map());
   } else {
-    filler->set_map_unsafe(free_space_map());
+    filler->set_map_no_write_barrier(free_space_map());
     FreeSpace::cast(filler)->set_size(size);
   }
 }
@@ -3189,7 +3193,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
+  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
       MapForExternalArrayType(array_type));
   reinterpret_cast<ExternalArray*>(result)->set_length(length);
   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -3226,7 +3230,7 @@
   if (!maybe_result->ToObject(&result)) return maybe_result;
 
   // Initialize the object
-  HeapObject::cast(result)->set_map_unsafe(code_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
   ASSERT(!isolate_->code_range()->exists() ||
       isolate_->code_range()->contains(code->address()));
@@ -3355,7 +3359,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // No need for write barrier since object is white and map is in old space.
-  HeapObject::cast(result)->set_map_unsafe(map);
+  HeapObject::cast(result)->set_map_no_write_barrier(map);
   return result;
 }
 
@@ -4084,7 +4088,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(chars);
@@ -4128,7 +4132,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4163,7 +4167,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map_unsafe(string_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4179,7 +4183,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
+  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
+      fixed_array_map());
   reinterpret_cast<FixedArray*>(result)->set_length(0);
   return result;
 }
@@ -4208,13 +4213,13 @@
   }
   if (InNewSpace(obj)) {
     HeapObject* dst = HeapObject::cast(obj);
-    dst->set_map_unsafe(map);
+    dst->set_map_no_write_barrier(map);
     CopyBlock(dst->address() + kPointerSize,
               src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  HeapObject::cast(obj)->set_map_unsafe(map);
+  HeapObject::cast(obj)->set_map_no_write_barrier(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
@@ -4234,7 +4239,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   HeapObject* dst = HeapObject::cast(obj);
-  dst->set_map_unsafe(map);
+  dst->set_map_no_write_barrier(map);
   CopyBlock(
       dst->address() + FixedDoubleArray::kLengthOffset,
       src->address() + FixedDoubleArray::kLengthOffset,
@@ -4252,7 +4257,7 @@
   }
   // Initialize header.
   FixedArray* array = reinterpret_cast<FixedArray*>(result);
-  array->set_map_unsafe(fixed_array_map());
+  array->set_map_no_write_barrier(fixed_array_map());
   array->set_length(length);
   // Initialize body.
   ASSERT(!InNewSpace(undefined_value()));
@@ -4300,7 +4305,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
   FixedArray* array = FixedArray::cast(result);
   array->set_length(length);
   MemsetPointer(array->data_start(), filler, length);
@@ -4333,7 +4338,8 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
+  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
+      fixed_array_map());
   FixedArray::cast(obj)->set_length(length);
   return obj;
 }
@@ -4347,7 +4353,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
+  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
       fixed_double_array_map());
   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
   return result;
@@ -4364,7 +4370,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
+  reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
       fixed_double_array_map());
   FixedDoubleArray::cast(obj)->set_length(length);
   return obj;
@@ -4401,7 +4407,8 @@
   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
+      hash_table_map());
   ASSERT(result->IsHashTable());
   return result;
 }
@@ -4414,7 +4421,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_unsafe(global_context_map());
+  context->set_map_no_write_barrier(global_context_map());
   ASSERT(context->IsGlobalContext());
   ASSERT(result->IsContext());
   return result;
@@ -4428,7 +4435,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_unsafe(function_context_map());
+  context->set_map_no_write_barrier(function_context_map());
   context->set_closure(function);
   context->set_previous(function->context());
   context->set_extension(NULL);
@@ -4448,7 +4455,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_unsafe(catch_context_map());
+  context->set_map_no_write_barrier(catch_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(name);
@@ -4466,7 +4473,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_unsafe(with_context_map());
+  context->set_map_no_write_barrier(with_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(extension);
@@ -4484,7 +4491,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_unsafe(block_context_map());
+  context->set_map_no_write_barrier(block_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(scope_info);
@@ -4497,7 +4504,7 @@
   FixedArray* scope_info;
   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
-  scope_info->set_map_unsafe(scope_info_map());
+  scope_info->set_map_no_write_barrier(scope_info_map());
   return scope_info;
 }
 
@@ -4541,8 +4548,10 @@
 
 
 bool Heap::IdleNotification(int hint) {
-  if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
-    return hint < 1000 ? true : IdleGlobalGC();
+  if (hint >= 1000) return IdleGlobalGC();
+  if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
+      FLAG_expose_gc || Serializer::enabled()) {
+    return true;
   }
 
   // By doing small chunks of GC work in each IdleNotification,
@@ -5150,29 +5159,29 @@
 
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
-  v->Synchronize("symbol_table");
+  v->Synchronize(VisitorSynchronization::kSymbolTable);
   if (mode != VISIT_ALL_IN_SCAVENGE &&
       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
-  v->Synchronize("external_string_table");
+  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
 }
 
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
-  v->Synchronize("strong_root_list");
+  v->Synchronize(VisitorSynchronization::kStrongRootList);
 
   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
-  v->Synchronize("symbol");
+  v->Synchronize(VisitorSynchronization::kSymbol);
 
   isolate_->bootstrapper()->Iterate(v);
-  v->Synchronize("bootstrapper");
+  v->Synchronize(VisitorSynchronization::kBootstrapper);
   isolate_->Iterate(v);
-  v->Synchronize("top");
+  v->Synchronize(VisitorSynchronization::kTop);
   Relocatable::Iterate(v);
-  v->Synchronize("relocatable");
+  v->Synchronize(VisitorSynchronization::kRelocatable);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->Iterate(v);
@@ -5180,13 +5189,13 @@
     isolate_->deoptimizer_data()->Iterate(v);
   }
 #endif
-  v->Synchronize("debug");
+  v->Synchronize(VisitorSynchronization::kDebug);
   isolate_->compilation_cache()->Iterate(v);
-  v->Synchronize("compilationcache");
+  v->Synchronize(VisitorSynchronization::kCompilationCache);
 
   // Iterate over local handles in handle scopes.
   isolate_->handle_scope_implementer()->Iterate(v);
-  v->Synchronize("handlescope");
+  v->Synchronize(VisitorSynchronization::kHandleScope);
 
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
@@ -5194,7 +5203,7 @@
   if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
-  v->Synchronize("builtins");
+  v->Synchronize(VisitorSynchronization::kBuiltins);
 
   // Iterate over global handles.
   switch (mode) {
@@ -5209,11 +5218,11 @@
       isolate_->global_handles()->IterateAllRoots(v);
       break;
   }
-  v->Synchronize("globalhandles");
+  v->Synchronize(VisitorSynchronization::kGlobalHandles);
 
   // Iterate over pointers being held by inactive threads.
   isolate_->thread_manager()->Iterate(v);
-  v->Synchronize("threadmanager");
+  v->Synchronize(VisitorSynchronization::kThreadManager);
 
   // Iterate over the pointers the Serialization/Deserialization code is
   // holding.
@@ -5413,7 +5422,7 @@
 
     Address map_addr = map_p->address();
 
-    obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
     MarkObjectRecursively(&map);
 
@@ -5460,7 +5469,7 @@
 
     HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-    obj->set_map(reinterpret_cast<Map*>(map_p));
+    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
 
     UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
 
@@ -6172,7 +6181,7 @@
 
   Address map_addr = map_p->address();
 
-  obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
   // Scan the object body.
   if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -6214,7 +6223,7 @@
 
   HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-  obj->set_map(reinterpret_cast<Map*>(map_p));
+  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
 
   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
 
diff --git a/src/heap.h b/src/heap.h
index 741e3d9..d92a4fb 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -245,6 +245,7 @@
   V(use_strict, "use strict")                                            \
   V(dot_symbol, ".")                                                     \
   V(anonymous_function_symbol, "(anonymous function)")                   \
+  V(compare_ic_symbol, ".compare_ic")                                   \
   V(infinity_symbol, "Infinity")                                         \
   V(minus_infinity_symbol, "-Infinity")
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 32c3abf..31d7d15 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1227,10 +1227,7 @@
 
 
 bool HArrayLiteral::IsCopyOnWrite() const {
-  Handle<FixedArray> constant_elements = this->constant_elements();
-  FixedArrayBase* constant_elements_values =
-      FixedArrayBase::cast(constant_elements->get(1));
-  return constant_elements_values->map() == HEAP->fixed_cow_array_map();
+  return boilerplate_object_->elements()->map() == HEAP->fixed_cow_array_map();
 }
 
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 52fed88..d7c0eb0 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -3447,8 +3447,21 @@
 
 class HLoadContextSlot: public HUnaryOperation {
  public:
-  HLoadContextSlot(HValue* context , int slot_index)
-      : HUnaryOperation(context), slot_index_(slot_index) {
+  enum Mode {
+    // Perform a normal load of the context slot without checking its value.
+    kLoad,
+    // Load and check the value of the context slot. Deoptimize if it's the
+    // hole value. This is used for checking for loading of uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kLoadCheck
+  };
+
+  HLoadContextSlot(HValue* context, Variable* var)
+      : HUnaryOperation(context), slot_index_(var->index()) {
+    ASSERT(var->IsContextSlot());
+    mode_ = (var->mode() == LET || var->mode() == CONST_HARMONY)
+        ? kLoadCheck : kLoad;
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnContextSlots);
@@ -3456,6 +3469,10 @@
 
   int slot_index() const { return slot_index_; }
 
+  bool RequiresHoleCheck() {
+    return mode_ == kLoadCheck;
+  }
+
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
@@ -3472,13 +3489,25 @@
 
  private:
   int slot_index_;
+  Mode mode_;
 };
 
 
 class HStoreContextSlot: public HTemplateInstruction<2> {
  public:
-  HStoreContextSlot(HValue* context, int slot_index, HValue* value)
-      : slot_index_(slot_index) {
+  enum Mode {
+    // Perform a normal store to the context slot without checking its previous
+    // value.
+    kAssign,
+    // Check the previous value of the context slot and deoptimize if it's the
+    // hole value. This is used for checking for assignments to uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kAssignCheck
+  };
+
+  HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+      : slot_index_(slot_index), mode_(mode) {
     SetOperandAt(0, context);
     SetOperandAt(1, value);
     SetFlag(kChangesContextSlots);
@@ -3487,11 +3516,16 @@
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
   int slot_index() const { return slot_index_; }
+  Mode mode() const { return mode_; }
 
   bool NeedsWriteBarrier() {
     return StoringValueNeedsWriteBarrier(value());
   }
 
+  bool RequiresHoleCheck() {
+    return mode_ == kAssignCheck;
+  }
+
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
@@ -3502,6 +3536,7 @@
 
  private:
   int slot_index_;
+  Mode mode_;
 };
 
 
@@ -4167,18 +4202,21 @@
 class HArrayLiteral: public HMaterializedLiteral<1> {
  public:
   HArrayLiteral(HValue* context,
-                Handle<FixedArray> constant_elements,
+                Handle<JSObject> boilerplate_object,
                 int length,
                 int literal_index,
                 int depth)
       : HMaterializedLiteral<1>(literal_index, depth),
         length_(length),
-        constant_elements_(constant_elements) {
+        boilerplate_object_(boilerplate_object) {
     SetOperandAt(0, context);
   }
 
   HValue* context() { return OperandAt(0); }
-  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  ElementsKind boilerplate_elements_kind() const {
+    return boilerplate_object_->GetElementsKind();
+  }
+  Handle<JSObject> boilerplate_object() const { return boilerplate_object_; }
   int length() const { return length_; }
 
   bool IsCopyOnWrite() const;
@@ -4192,7 +4230,7 @@
 
  private:
   int length_;
-  Handle<FixedArray> constant_elements_;
+  Handle<JSObject> boilerplate_object_;
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 5c0703b..3663237 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -2756,10 +2756,13 @@
   }
 
   // 2. Build all the tests, with dangling true branches
+  int default_id = AstNode::kNoNumber;
   for (int i = 0; i < clause_count; ++i) {
     CaseClause* clause = clauses->at(i);
-    if (clause->is_default()) continue;
-
+    if (clause->is_default()) {
+      default_id = clause->EntryId();
+      continue;
+    }
     if (switch_type == SMI_SWITCH) {
       clause->RecordTypeFeedback(oracle());
     }
@@ -2806,7 +2809,10 @@
   HBasicBlock* last_block = current_block();
 
   if (not_string_block != NULL) {
-    last_block = CreateJoin(last_block, not_string_block, stmt->ExitId());
+    int join_id = (default_id != AstNode::kNoNumber)
+        ? default_id
+        : stmt->ExitId();
+    last_block = CreateJoin(last_block, not_string_block, join_id);
   }
 
   // 3. Loop over the clauses and the linked list of tests in lockstep,
@@ -3222,11 +3228,11 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->var();
-  if (variable->mode() == LET) {
-    return Bailout("reference to let variable");
-  }
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
+      if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
+        return Bailout("reference to global harmony declared variable");
+      }
       // Handle known global constants like 'undefined' specially to avoid a
       // load from a global cell for them.
       Handle<Object> constant_value =
@@ -3269,9 +3275,11 @@
     case Variable::PARAMETER:
     case Variable::LOCAL: {
       HValue* value = environment()->Lookup(variable);
-      if (variable->mode() == CONST &&
-          value == graph()->GetConstantHole()) {
-        return Bailout("reference to uninitialized const variable");
+      if (value == graph()->GetConstantHole()) {
+        ASSERT(variable->mode() == CONST ||
+               variable->mode() == CONST_HARMONY ||
+               variable->mode() == LET);
+        return Bailout("reference to uninitialized variable");
       }
       return ast_context()->ReturnValue(value);
     }
@@ -3281,8 +3289,7 @@
         return Bailout("reference to const context slot");
       }
       HValue* context = BuildContextChainWalk(variable);
-      HLoadContextSlot* instr =
-          new(zone()) HLoadContextSlot(context, variable->index());
+      HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
       return ast_context()->ReturnInstruction(instr, expr->id());
     }
 
@@ -3325,13 +3332,13 @@
                                 int* total_size) {
   if (max_depth <= 0) return false;
 
-  FixedArrayBase* elements = boilerplate->elements();
+  Handle<FixedArrayBase> elements(boilerplate->elements());
   if (elements->length() > 0 &&
       elements->map() != HEAP->fixed_cow_array_map()) {
     return false;
   }
 
-  FixedArray* properties = boilerplate->properties();
+  Handle<FixedArray> properties(boilerplate->properties());
   if (properties->length() > 0) {
     return false;
   } else {
@@ -3457,11 +3464,25 @@
   int length = subexprs->length();
   HValue* context = environment()->LookupContext();
 
-  HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
-                                                     expr->constant_elements(),
-                                                     length,
-                                                     expr->literal_index(),
-                                                     expr->depth());
+  Handle<FixedArray> literals(environment()->closure()->literals());
+  Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
+
+  // For now, no boilerplate causes a deopt.
+  if (raw_boilerplate->IsUndefined()) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+  }
+
+  Handle<JSObject> boilerplate(Handle<JSObject>::cast(raw_boilerplate));
+  ElementsKind boilerplate_elements_kind = boilerplate->GetElementsKind();
+
+  HArrayLiteral* literal = new(zone()) HArrayLiteral(
+      context,
+      boilerplate,
+      length,
+      expr->literal_index(),
+      expr->depth());
+
   // The array is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3484,42 +3505,25 @@
     HValue* key = AddInstruction(
         new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
                               Representation::Integer32()));
-    HInstruction* elements_kind =
-        AddInstruction(new(zone()) HElementsKind(literal));
-    HBasicBlock* store_fast = graph()->CreateBasicBlock();
-    // Two empty blocks to satisfy edge split form.
-    HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
-    HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
-    HBasicBlock* store_generic = graph()->CreateBasicBlock();
-    HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
-    HBasicBlock* join = graph()->CreateBasicBlock();
 
-    HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
-    smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
-    smicheck->SetSuccessorAt(1, check_smi_only_elements);
-    current_block()->Finish(smicheck);
-    store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
-
-    set_current_block(check_smi_only_elements);
-    HCompareConstantEqAndBranch* smi_elements_check =
-        new(zone()) HCompareConstantEqAndBranch(elements_kind,
-                                                FAST_ELEMENTS,
-                                                Token::EQ_STRICT);
-    smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
-    smi_elements_check->SetSuccessorAt(1, store_generic);
-    current_block()->Finish(smi_elements_check);
-    store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
-
-    set_current_block(store_fast);
-    AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
-    store_fast->Goto(join);
-
-    set_current_block(store_generic);
-    AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
-    store_generic->Goto(join);
-
-    join->SetJoinId(expr->id());
-    set_current_block(join);
+    switch (boilerplate_elements_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
+      case FAST_ELEMENTS:
+        AddInstruction(new(zone()) HStoreKeyedFastElement(
+            elements,
+            key,
+            value,
+            boilerplate_elements_kind));
+        break;
+      case FAST_DOUBLE_ELEMENTS:
+        AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                                key,
+                                                                value));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
 
     AddSimulate(expr->GetIdForElement(i));
   }
@@ -3838,8 +3842,11 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot::Mode mode =
+            (var->mode() == LET || var->mode() == CONST_HARMONY)
+            ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), Top());
+            new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
         AddInstruction(instr);
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId());
@@ -3959,8 +3966,10 @@
       // variables (e.g. initialization inside a loop).
       HValue* old_value = environment()->Lookup(var);
       AddInstruction(new HUseConst(old_value));
-    } else if (var->mode() == LET) {
-      return Bailout("unsupported assignment to let");
+    } else if (var->mode() == CONST_HARMONY) {
+      if (expr->op() != Token::INIT_CONST_HARMONY) {
+        return Bailout("non-initializer assignment to const");
+      }
     }
 
     if (proxy->IsArguments()) return Bailout("assignment to arguments");
@@ -3977,6 +3986,14 @@
 
       case Variable::PARAMETER:
       case Variable::LOCAL: {
+        // Perform an initialization check for let declared variables
+        // or parameters.
+        if (var->mode() == LET && expr->op() == Token::ASSIGN) {
+          HValue* env_value = environment()->Lookup(var);
+          if (env_value == graph()->GetConstantHole()) {
+            return Bailout("assignment to let variable before initialization");
+          }
+        }
         // We do not allow the arguments object to occur in a context where it
         // may escape, but assignments to stack-allocated locals are
         // permitted.
@@ -4004,8 +4021,18 @@
 
         CHECK_ALIVE(VisitForValue(expr->value()));
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), Top());
+        HStoreContextSlot::Mode mode;
+        if (expr->op() == Token::ASSIGN) {
+          mode = (var->mode() == LET || var->mode() == CONST_HARMONY)
+              ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
+        } else {
+          ASSERT(expr->op() == Token::INIT_VAR ||
+                 expr->op() == Token::INIT_LET ||
+                 expr->op() == Token::INIT_CONST_HARMONY);
+          mode = HStoreContextSlot::kAssign;
+        }
+        HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
+            context, var->index(), mode, Top());
         AddInstruction(instr);
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId());
@@ -5614,8 +5641,11 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot::Mode mode =
+            (var->mode() == LET || var->mode() == CONST_HARMONY)
+            ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), after);
+            new(zone()) HStoreContextSlot(context, var->index(), mode, after);
         AddInstruction(instr);
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId());
@@ -6116,14 +6146,27 @@
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
-        AddInstruction(new(zone()) HCheckNonSmi(left));
-        AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
-        AddInstruction(new(zone()) HCheckNonSmi(right));
-        AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
-        HCompareObjectEqAndBranch* result =
-            new(zone()) HCompareObjectEqAndBranch(left, right);
-        result->set_position(expr->position());
-        return ast_context()->ReturnControl(result, expr->id());
+        // Can we get away with map check and not instance type check?
+        Handle<Map> map = oracle()->GetCompareMap(expr);
+        if (!map.is_null()) {
+          AddInstruction(new(zone()) HCheckNonSmi(left));
+          AddInstruction(new(zone()) HCheckMap(left, map));
+          AddInstruction(new(zone()) HCheckNonSmi(right));
+          AddInstruction(new(zone()) HCheckMap(right, map));
+          HCompareObjectEqAndBranch* result =
+              new(zone()) HCompareObjectEqAndBranch(left, right);
+          result->set_position(expr->position());
+          return ast_context()->ReturnControl(result, expr->id());
+        } else {
+          AddInstruction(new(zone()) HCheckNonSmi(left));
+          AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+          AddInstruction(new(zone()) HCheckNonSmi(right));
+          AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+          HCompareObjectEqAndBranch* result =
+              new(zone()) HCompareObjectEqAndBranch(left, right);
+          result->set_position(expr->position());
+          return ast_context()->ReturnControl(result, expr->id());
+        }
       }
       default:
         return Bailout("Unsupported non-primitive compare");
@@ -6188,28 +6231,27 @@
 void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
                                       VariableMode mode,
                                       FunctionLiteral* function) {
-  if (mode == LET || mode == CONST_HARMONY) {
-    return Bailout("unsupported harmony declaration");
-  }
   Variable* var = proxy->var();
+  bool binding_needs_init =
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (var->location()) {
     case Variable::UNALLOCATED:
       return Bailout("unsupported global declaration");
     case Variable::PARAMETER:
     case Variable::LOCAL:
     case Variable::CONTEXT:
-      if (mode == CONST || function != NULL) {
+      if (binding_needs_init || function != NULL) {
         HValue* value = NULL;
-        if (mode == CONST) {
-          value = graph()->GetConstantHole();
-        } else {
+        if (function != NULL) {
           VisitForValue(function);
           value = Pop();
+        } else {
+          value = graph()->GetConstantHole();
         }
         if (var->IsContextSlot()) {
           HValue* context = environment()->LookupContext();
-          HStoreContextSlot* store =
-              new HStoreContextSlot(context, var->index(), value);
+          HStoreContextSlot* store = new HStoreContextSlot(
+              context, var->index(), HStoreContextSlot::kAssign, value);
           AddInstruction(store);
           if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
         } else {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index fb625fb..7a5a191 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -388,8 +388,91 @@
 
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
-  while ((pc_offset() & (m - 1)) != 0) {
-    nop();
+  int mask = m - 1;
+  int addr = pc_offset();
+  Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+  Address a = addr;
+  while (*a == 0x66) a++;
+  if (*a == 0x90) return true;
+  if (a[0] == 0xf && a[1] == 0x1f) return true;
+  return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+  EnsureSpace ensure_space(this);
+
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    // Older CPUs that do not support SSE2 may not support multibyte NOP
+    // instructions.
+    for (; bytes > 0; bytes--) {
+      EMIT(0x90);
+    }
+    return;
+  }
+
+  // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
+  while (bytes > 0) {
+    switch (bytes) {
+      case 2:
+        EMIT(0x66);
+      case 1:
+        EMIT(0x90);
+        return;
+      case 3:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0);
+        return;
+      case 4:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x40);
+        EMIT(0);
+        return;
+      case 6:
+        EMIT(0x66);
+      case 5:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x44);
+        EMIT(0);
+        EMIT(0);
+        return;
+      case 7:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x80);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        return;
+      default:
+      case 11:
+        EMIT(0x66);
+        bytes--;
+      case 10:
+        EMIT(0x66);
+        bytes--;
+      case 9:
+        EMIT(0x66);
+        bytes--;
+      case 8:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x84);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        bytes -= 8;
+    }
   }
 }
 
@@ -463,13 +546,6 @@
 }
 
 
-void Assembler::push(Handle<Object> handle) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x68);
-  emit(handle);
-}
-
-
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -1640,6 +1716,27 @@
 }
 
 
+void Assembler::f2xm1() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDB);
+  EMIT(0xE3);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -1953,6 +2050,16 @@
 }
 
 
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2E);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
@@ -2158,6 +2265,19 @@
 }
 
 
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+  ASSERT(CpuFeatures::IsSupported(SSE4_1));
+  ASSERT(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x17);
+  emit_sse_operand(dst, src);
+  EMIT(imm8);
+}
+
+
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d798f81..9ed46fc 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@
   // possible to align the pc offset to a multiple
   // of m. m must be a power of 2.
   void Align(int m);
+  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -673,7 +674,6 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
-  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -926,6 +926,9 @@
   void fsin();
   void fptan();
   void fyl2x();
+  void f2xm1();
+  void fscale();
+  void fninit();
 
   void fadd(int i);
   void fsub(int i);
@@ -983,6 +986,7 @@
   void andpd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, const Operand& src);
 
   enum RoundingMode {
     kRoundToNearest = 0x0,
@@ -1017,6 +1021,7 @@
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
   void movss(XMMRegister dst, XMMRegister src);
+  void extractps(Register dst, XMMRegister src, byte imm8);
 
   void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
@@ -1080,7 +1085,7 @@
   // Get the number of bytes available in the buffer.
   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
 
-  static bool IsNop(Address addr) { return *addr == 0x90; }
+  static bool IsNop(Address addr);
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index e12e79a..28a9b0f 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1238,37 +1238,42 @@
                   false,
                   &prepare_generic_code_call);
   __ IncrementCounter(counters->array_function_native(), 1);
-  __ mov(eax, ebx);
-  __ pop(ebx);
-  if (construct_call) {
-    __ pop(edi);
-  }
-  __ push(eax);
-  // eax: JSArray
+  __ push(ebx);
+  __ mov(ebx, Operand(esp, kPointerSize));
   // ebx: argc
   // edx: elements_array_end (untagged)
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
 
   // Location of the last argument
-  __ lea(edi, Operand(esp, 2 * kPointerSize));
+  int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
+  __ lea(edi, Operand(esp, last_arg_offset));
 
   // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+  // AllocateJSArray is false, so the FixedArray is returned in ecx).
   __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
 
+  Label has_non_smi_element;
+
   // ebx: argc
   // edx: location of the first array element
   // edi: location of the last argument
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
   Label loop, entry;
   __ mov(ecx, ebx);
   __ jmp(&entry);
   __ bind(&loop);
   __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(eax, &has_non_smi_element);
+  }
   __ mov(Operand(edx, 0), eax);
   __ add(edx, Immediate(kPointerSize));
   __ bind(&entry);
@@ -1278,13 +1283,20 @@
   // Remove caller arguments from the stack and return.
   // ebx: argc
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
+  __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
   __ pop(eax);
-  __ pop(ecx);
-  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
-  __ push(ecx);
-  __ ret(0);
+  __ pop(ebx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size,
+                      last_arg_offset - kPointerSize));
+  __ jmp(ecx);
+
+  __ bind(&has_non_smi_element);
+  // Throw away the array that's only been partially constructed.
+  __ pop(eax);
 
   // Restore argc and constructor before running the generic code.
   __ bind(&prepare_generic_code_call);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 68eebd3..eabf201 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2938,157 +2938,263 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  // Registers are used as follows:
-  // edx = base
-  // eax = exponent
-  // ecx = temporary, result
-
   CpuFeatures::Scope use_sse2(SSE2);
-  Label allocate_return, call_runtime;
-
-  // Load input parameters.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-
-  // Save 1 in xmm3 - we need this several times later on.
-  __ mov(ecx, Immediate(1));
-  __ cvtsi2sd(xmm3, ecx);
-
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(eax, &exponent_nonsmi);
-  __ JumpIfNotSmi(edx, &base_nonsmi);
-
-  // Optimized version when both exponent and base are smis.
-  Label powi;
-  __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, edx);
-  __ jmp(&powi);
-  // exponent is smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
+  const Register exponent = eax;
+  const Register base = edx;
+  const Register scratch = ecx;
+  const XMMRegister double_result = xmm3;
+  const XMMRegister double_base = xmm2;
+  const XMMRegister double_exponent = xmm1;
+  const XMMRegister double_scratch = xmm4;
 
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  Label call_runtime, done, exponent_not_smi, int_exponent;
 
-  // Optimized version of pow if exponent is a smi.
-  // xmm0 contains the base.
-  __ bind(&powi);
-  __ SmiUntag(eax);
+  // Save 1 in double_result - we need this several times later on.
+  __ mov(scratch, Immediate(1));
+  __ cvtsi2sd(double_result, scratch);
 
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ mov(edx, eax);
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack.
+    __ mov(base, Operand(esp, 2 * kPointerSize));
+    __ mov(exponent, Operand(esp, 1 * kPointerSize));
+
+    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+    __ cmp(FieldOperand(base, HeapObject::kMapOffset),
+           factory->heap_number_map());
+    __ j(not_equal, &call_runtime);
+
+    __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent, Label::kNear);
+
+    __ bind(&base_is_smi);
+    __ SmiUntag(base);
+    __ cvtsi2sd(double_base, base);
+
+    __ bind(&unpack_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
+
+    __ bind(&exponent_not_smi);
+    __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
+           factory->heap_number_map());
+    __ j(not_equal, &call_runtime);
+    __ movdbl(double_exponent,
+              FieldOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
+
+    __ bind(&exponent_not_smi);
+    __ movdbl(double_exponent,
+              FieldOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type_ != INTEGER) {
+    Label fast_power;
+    // Detect integer exponents stored as double.
+    __ cvttsd2si(exponent, Operand(double_exponent));
+    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+    __ cmp(exponent, Immediate(0x80000000u));
+    __ j(equal, &call_runtime);
+    __ cvtsi2sd(double_scratch, exponent);
+    // Already ruled out NaNs for exponent.
+    __ ucomisd(double_exponent, double_scratch);
+    __ j(equal, &int_exponent);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label continue_sqrt, continue_rsqrt, not_plus_half;
+      // Test for 0.5.
+      // Load double_scratch with 0.5.
+      __ mov(scratch, Immediate(0x3F000000u));
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &not_plus_half, Label::kNear);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, single-precision -Infinity has the highest
+      // 9 bits set and the lowest 23 bits cleared.
+      __ mov(scratch, 0xFF800000u);
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      __ ucomisd(double_base, double_scratch);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_sqrt, Label::kNear);
+      __ j(carry, &continue_sqrt, Label::kNear);
+
+      // Set result to Infinity in the special case.
+      __ xorps(double_result, double_result);
+      __ subsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&continue_sqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_scratch, double_scratch);
+      __ addsd(double_scratch, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      // Test for -0.5.
+      __ bind(&not_plus_half);
+      // Load double_exponent with -0.5 by substracting 1.
+      __ subsd(double_scratch, double_result);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &fast_power, Label::kNear);
+
+      // Calculates reciprocal of square root of base.  Check for the special
+      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, single-precision -Infinity has the highest
+      // 9 bits set and the lowest 23 bits cleared.
+      __ mov(scratch, 0xFF800000u);
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      __ ucomisd(double_base, double_scratch);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_rsqrt, Label::kNear);
+      __ j(carry, &continue_rsqrt, Label::kNear);
+
+      // Set result to 0 in the special case.
+      __ xorps(double_result, double_result);
+      __ jmp(&done);
+
+      __ bind(&continue_rsqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_exponent, double_exponent);
+      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_exponent, double_exponent);
+      __ divsd(double_result, double_exponent);
+      __ jmp(&done);
+    }
+
+    // Using FPU instructions to calculate power.
+    Label fast_power_failed;
+    __ bind(&fast_power);
+    __ fnclex();  // Clear flags to catch exceptions later.
+    // Transfer (B)ase and (E)xponent onto the FPU register stack.
+    __ sub(esp, Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), double_exponent);
+    __ fld_d(Operand(esp, 0));  // E
+    __ movdbl(Operand(esp, 0), double_base);
+    __ fld_d(Operand(esp, 0));  // B, E
+
+    // Exponent is in st(1) and base is in st(0)
+    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+    // FYL2X calculates st(1) * log2(st(0))
+    __ fyl2x();    // X
+    __ fld(0);     // X, X
+    __ frndint();  // rnd(X), X
+    __ fsub(1);    // rnd(X), X-rnd(X)
+    __ fxch(1);    // X - rnd(X), rnd(X)
+    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
+    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
+    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
+    // FSCALE calculates st(0) * 2^st(1)
+    __ fscale();   // 2^X, rnd(X)
+    __ fstp(1);
+    // Bail out to runtime in case of exceptions in the status word.
+    __ fnstsw_ax();
+    __ test_b(eax, 0x5F);  // We check for all but precision exception.
+    __ j(not_zero, &fast_power_failed, Label::kNear);
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(double_result, Operand(esp, 0));
+    __ add(esp, Immediate(kDoubleSize));
+    __ jmp(&done);
+
+    __ bind(&fast_power_failed);
+    __ fninit();
+    __ add(esp, Immediate(kDoubleSize));
+    __ jmp(&call_runtime);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+  const XMMRegister double_scratch2 = double_exponent;
+  __ mov(scratch, exponent);  // Back up exponent.
+  __ movsd(double_scratch, double_base);  // Back up base.
+  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
 
   // Get absolute value of exponent.
-  Label no_neg;
-  __ cmp(eax, 0);
-  __ j(greater_equal, &no_neg, Label::kNear);
-  __ neg(eax);
+  Label no_neg, while_true, no_multiply;
+  __ test(scratch, scratch);
+  __ j(positive, &no_neg, Label::kNear);
+  __ neg(scratch);
   __ bind(&no_neg);
 
-  // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
   __ bind(&while_true);
-  __ shr(eax, 1);
+  __ shr(scratch, 1);
   __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(xmm1, xmm0);
+  __ mulsd(double_result, double_scratch);
   __ bind(&no_multiply);
-  __ mulsd(xmm0, xmm0);
+
+  __ mulsd(double_scratch, double_scratch);
   __ j(not_zero, &while_true);
 
-  // base has the original value of the exponent - if the exponent  is
-  // negative return 1/result.
-  __ test(edx, edx);
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ mov(ecx, Immediate(0x7FB00000));
-  __ movd(xmm0, ecx);
-  __ cvtss2sd(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  __ j(equal, &call_runtime);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
+  // scratch has the original value of the exponent - if the exponent is
+  // negative, return 1/result.
+  __ test(exponent, exponent);
+  __ j(positive, &done);
+  __ divsd(double_scratch2, double_result);
+  __ movsd(double_result, double_scratch2);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ xorps(double_scratch2, double_scratch2);
+  __ ucomisd(double_scratch2, double_result);  // Result cannot be NaN.
+  // double_exponent aliased as double_scratch2 has already been overwritten
+  // and may not have contained the exponent value in the first place when the
+  // exponent is a smi.  We reset it with exponent value before bailing out.
+  __ j(not_equal, &done);
+  __ cvtsi2sd(double_exponent, exponent);
 
-  // exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
-  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  __ j(parity_even, &call_runtime);
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 
-  Label base_not_smi;
-  Label handle_special_cases;
-  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
-  __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, edx);
-  __ jmp(&handle_special_cases, Label::kNear);
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(2 * kPointerSize);
+  } else {
+    __ bind(&call_runtime);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(4, scratch);
+      __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
+      __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()), 4);
+    }
+    // Return value is in st(0) on ia32.
+    // Store it into the (fixed) result register.
+    __ sub(esp, Immediate(kDoubleSize));
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(double_result, Operand(esp, 0));
+    __ add(esp, Immediate(kDoubleSize));
 
-  __ bind(&base_not_smi);
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
-  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
-  __ and_(ecx, HeapNumber::kExponentMask);
-  __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  __ j(greater_equal, &call_runtime);
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ mov(ecx, Immediate(0xBF000000));
-  __ movd(xmm2, ecx);
-  __ cvtss2sd(xmm2, xmm2);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half, Label::kNear);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &call_runtime);
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-
-  __ bind(&allocate_return);
-  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
-  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
-  __ mov(eax, ecx);
-  __ ret(2 * kPointerSize);
-
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(0);
+  }
 }
 
 
@@ -4540,7 +4646,8 @@
     // megamorphic.
     __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
     __ j(equal, &initialize, Label::kNear);
-    // MegamorphicSentinel is a root so no write-barrier is needed.
+    // MegamorphicSentinel is an immortal immovable object (undefined) so no
+    // write-barrier is needed.
     __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
            Immediate(MegamorphicSentinel(isolate)));
     __ jmp(&call, Label::kNear);
@@ -4548,14 +4655,7 @@
     // An uninitialized cache is patched with the function.
     __ bind(&initialize);
     __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
-    __ mov(ecx, edi);
-    __ RecordWriteField(ebx,
-                        JSGlobalPropertyCell::kValueOffset,
-                        ecx,
-                        edx,
-                        kDontSaveFPRegs,
-                        OMIT_REMEMBERED_SET,  // Cells are rescanned.
-                        OMIT_SMI_CHECK);
+    // No need for a write barrier here - cells are rescanned.
 
     __ bind(&call);
   }
@@ -4587,6 +4687,8 @@
     // non-function case.
     __ mov(ebx, Operand(esp, 0));
     __ mov(ebx, Operand(ebx, 1));
+    // MegamorphicSentinel is an immortal immovable object (undefined) so no
+    // write barrier is needed.
     __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
            Immediate(MegamorphicSentinel(isolate)));
   }
@@ -5991,20 +6093,23 @@
   __ JumpIfNotSmi(edx, &runtime);
   __ sub(ecx, edx);
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
-  Label return_eax;
-  __ j(equal, &return_eax);
+  Label not_original_string;
+  __ j(not_equal, &not_original_string, Label::kNear);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1);
+  __ ret(3 * kPointerSize);
+  __ bind(&not_original_string);
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
   // cache). Two character strings are looked for in the symbol cache.
-  __ SmiUntag(ecx);  // Result length is no longer smi.
-  __ cmp(ecx, 2);
+  __ cmp(ecx, Immediate(Smi::FromInt(2)));
   __ j(greater, &result_longer_than_two);
   __ j(less, &runtime);
 
   // Sub string of length 2 requested.
   // eax: string
   // ebx: instance type
-  // ecx: sub string length (value is 2)
+  // ecx: sub string length (smi, value is 2)
   // edx: from index (smi)
   __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
 
@@ -6019,6 +6124,7 @@
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, ebx, ecx, eax, edx, edi,
       &make_two_character_string, &make_two_character_string);
+  __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
   __ bind(&make_two_character_string);
@@ -6026,55 +6132,61 @@
   __ mov(eax, Operand(esp, 3 * kPointerSize));
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ Set(ecx, Immediate(2));
+  __ Set(ecx, Immediate(Smi::FromInt(2)));
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // Load index.
+
+  __ bind(&result_longer_than_two);
+  // eax: string
+  // ebx: instance type
+  // ecx: sub string length (smi)
+  // edx: from index (smi)
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into edi.
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ test(ebx, Immediate(kIsIndirectStringMask));
+  __ j(zero, &seq_or_external_string, Label::kNear);
+
+  Factory* factory = masm->isolate()->factory();
+  __ test(ebx, Immediate(kSlicedNotConsMask));
+  __ j(not_zero, &sliced_string, Label::kNear);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  // Flat cons strings have an empty second part.
+  __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+         factory->empty_string());
+  __ j(not_equal, &runtime);
+  __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+  // Update instance type.
+  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and adjust start index by offset.
+  __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+  __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+  // Update instance type.
+  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mov(edi, eax);
+
+  __ bind(&underlying_unpacked);
 
   if (FLAG_string_slices) {
     Label copy_routine;
-    // If coming from the make_two_character_string path, the string
-    // is too short to be sliced anyways.
-    STATIC_ASSERT(2 < SlicedString::kMinLength);
-    __ jmp(&copy_routine);
-    __ bind(&result_longer_than_two);
-
-    // eax: string
-    // ebx: instance type
-    // ecx: sub string length
-    // edx: from index (smi)
-    Label allocate_slice, sliced_string, seq_or_external_string;
-    __ cmp(ecx, SlicedString::kMinLength);
-    // Short slice.  Copy instead of slicing.
-    __ j(less, &copy_routine);
-    // If the string is not indirect, it can only be sequential or external.
-    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-    STATIC_ASSERT(kIsIndirectStringMask != 0);
-    __ test(ebx, Immediate(kIsIndirectStringMask));
-    __ j(zero, &seq_or_external_string, Label::kNear);
-
-    Factory* factory = masm->isolate()->factory();
-    __ test(ebx, Immediate(kSlicedNotConsMask));
-    __ j(not_zero, &sliced_string, Label::kNear);
-    // Cons string.  Check whether it is flat, then fetch first part.
-    __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
-           factory->empty_string());
-    __ j(not_equal, &runtime);
-    __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&sliced_string);
-    // Sliced string.  Fetch parent and correct start index by offset.
-    __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
-    __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&seq_or_external_string);
-    // Sequential or external string.  Just move string to the correct register.
-    __ mov(edi, eax);
-
-    __ bind(&allocate_slice);
     // edi: underlying subject string
     // ebx: instance type of original subject string
-    // edx: offset
-    // ecx: length
+    // edx: adjusted start index (smi)
+    // ecx: length (smi)
+    __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
+    // Short slice.  Copy instead of slicing.
+    __ j(less, &copy_routine);
     // Allocate new sliced string.  At this point we do not reload the instance
     // type including the string encoding because we simply rely on the info
     // provided by the original string.  It does not matter if the original
@@ -6091,27 +6203,50 @@
     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
     __ bind(&set_slice_header);
     __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
-    __ SmiTag(ecx);
     __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
     __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
     __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
            Immediate(String::kEmptyHashField));
-    __ jmp(&return_eax);
+    __ IncrementCounter(counters->sub_string_native(), 1);
+    __ ret(3 * kPointerSize);
 
     __ bind(&copy_routine);
-  } else {
-    __ bind(&result_longer_than_two);
   }
 
-  // eax: string
-  // ebx: instance type
-  // ecx: result string length
-  // Check for flat ascii string
-  Label non_ascii_flat;
-  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+  // edi: underlying subject string
+  // ebx: instance type of original subject string
+  // edx: adjusted start index (smi)
+  // ecx: length (smi)
+  // The subject string can only be external or sequential string of either
+  // encoding at this point.
+  Label two_byte_sequential, runtime_drop_two, sequential_string;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test_b(ebx, kExternalStringTag);
+  __ j(zero, &sequential_string);
 
-  // Allocate the result.
-  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+  // Handle external string.
+  Label ascii_external, done;
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ test_b(ebx, kShortExternalStringMask);
+  __ j(not_zero, &runtime);
+  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&sequential_string);
+  // Stash away (adjusted) index and (underlying) string.
+  __ push(edx);
+  __ push(edi);
+  __ SmiUntag(ecx);
+  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+  __ test_b(ebx, kStringEncodingMask);
+  __ j(zero, &two_byte_sequential);
+
+  // Sequential ascii string.  Allocate the result.
+  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
@@ -6120,11 +6255,10 @@
   __ mov(edi, eax);
   __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
+  __ pop(esi);
+  __ pop(ebx);
   __ SmiUntag(ebx);
-  __ add(esi, ebx);
+  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
 
   // eax: result string
   // ecx: result length
@@ -6133,20 +6267,12 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
   __ mov(esi, edx);  // Restore esi.
-  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
-  __ bind(&non_ascii_flat);
-  // eax: string
-  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
-  // ecx: result string length
-  // Check for flat two byte string
-  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
-  __ j(not_equal, &runtime);
-
-  // Allocate the result.
-  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+  __ bind(&two_byte_sequential);
+  // Sequential two-byte string.  Allocate the result.
+  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
@@ -6156,14 +6282,13 @@
   __ add(edi,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
+  __ pop(esi);
+  __ pop(ebx);
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(esi, ebx);
+  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
 
   // eax: result string
   // ecx: result length
@@ -6172,11 +6297,13 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
   __ mov(esi, edx);  // Restore esi.
-
-  __ bind(&return_eax);
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
+  // Drop pushed values on the stack before tail call.
+  __ bind(&runtime_drop_two);
+  __ Drop(2);
+
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -6568,33 +6695,45 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  // Save the registers.
-  __ pop(ecx);
-  __ push(edx);
-  __ push(eax);
-  __ push(ecx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
+  __ JumpIfSmi(ecx, &miss, Label::kNear);
 
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(ecx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+  __ cmp(ebx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ sub(eax, edx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
                                                masm->isolate());
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(edx);
+    __ push(edx);  // Preserve edx and eax.
+    __ push(eax);
+    __ push(edx);  // And also use them as the arguments.
     __ push(eax);
     __ push(Immediate(Smi::FromInt(op_)));
     __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+    __ pop(eax);
+    __ pop(edx);
   }
 
-  // Compute the entry point of the rewritten stub.
-  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
-  // Restore registers.
-  __ pop(ecx);
-  __ pop(eax);
-  __ pop(edx);
-  __ push(ecx);
-
   // Do a tail call to the rewritten stub.
   __ jmp(edi);
 }
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2649560..b37b54b 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -258,9 +258,7 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
-    __ nop();
-  }
+  __ Nop(Assembler::kDebugBreakSlotLength);
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index eeee4f2..98c2400 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -231,8 +231,8 @@
   ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
          *(call_target_address - 2) == 0x07 &&  // offset
          *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x90;  // nop
-  *(call_target_address - 2) = 0x90;  // nop
+  *(call_target_address - 3) = 0x66;  // 2 byte nop part 1
+  *(call_target_address - 2) = 0x90;  // 2 byte nop part 2
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
 
@@ -250,8 +250,8 @@
          Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
-         *(call_target_address - 2) == 0x90 &&  // nop
+  ASSERT(*(call_target_address - 3) == 0x66 &&  // 2 byte nop part 1
+         *(call_target_address - 2) == 0x90 &&  // 2 byte nop part 2
          *(call_target_address - 1) == 0xe8);   // call
   *(call_target_address - 3) = 0x73;  // jae
   *(call_target_address - 2) = 0x07;  // offset
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index da22390..b5ddcca 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -763,10 +763,13 @@
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
+            case 0xFC: mnem = "frndint"; break;
+            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -788,6 +791,8 @@
         has_register = true;
       } else if (modrm_byte  == 0xE2) {
         mnem = "fclex";
+      } else if (modrm_byte == 0xE3) {
+        mnem = "fninit";
       } else {
         UnimplementedInstruction();
       }
@@ -987,7 +992,7 @@
         break;
 
       case 0x0F:
-        { byte f0byte = *(data+1);
+        { byte f0byte = data[1];
           const char* f0mnem = F0Mnem(f0byte);
           if (f0byte == 0x18) {
             int mod, regop, rm;
@@ -995,6 +1000,25 @@
             const char* suffix[] = {"nta", "1", "2", "3"};
             AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
             data += PrintRightOperand(data);
+          } else if (f0byte == 0x1F && data[2] == 0) {
+            AppendToBuffer("nop");  // 3 byte nop.
+            data += 3;
+          } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+            AppendToBuffer("nop");  // 4 byte nop.
+            data += 4;
+          } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+                     data[4] == 0) {
+            AppendToBuffer("nop");  // 5 byte nop.
+            data += 5;
+          } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+                     data[4] == 0 && data[5] == 0 && data[6] == 0) {
+            AppendToBuffer("nop");  // 7 byte nop.
+            data += 7;
+          } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+                     data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+                     data[7] == 0) {
+            AppendToBuffer("nop");  // 8 byte nop.
+            data += 8;
           } else if (f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
@@ -1130,8 +1154,12 @@
         break;
 
       case 0x66:  // prefix
-        data++;
-        if (*data == 0x8B) {
+        while (*data == 0x66) data++;
+        if (*data == 0xf && data[1] == 0x1f) {
+          AppendToBuffer("nop");  // 0x66 prefix
+        } else if (*data == 0x90) {
+          AppendToBuffer("nop");  // 0x66 prefix
+        } else if (*data == 0x8B) {
           data++;
           data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
         } else if (*data == 0x89) {
@@ -1185,6 +1213,16 @@
                              NameOfXMMRegister(rm),
                              static_cast<int>(imm8));
               data += 2;
+            } else if (*data == 0x17) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("extractps %s,%s,%d",
+                             NameOfCPURegister(regop),
+                             NameOfXMMRegister(rm),
+                             static_cast<int>(imm8));
+              data += 2;
             } else if (*data == 0x22) {
               data++;
               int mod, regop, rm;
@@ -1258,6 +1296,9 @@
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
+          } else if (*data == 0x90) {
+            data++;
+            AppendToBuffer("nop");  // 2 byte nop.
           } else if (*data == 0xF3) {
             data++;
             int mod, regop, rm;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ef4f0c5..6e23911 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2883,7 +2883,7 @@
   VisitForStackValue(args->at(1));
 
   if (CpuFeatures::IsSupported(SSE2)) {
-    MathPowStub stub;
+    MathPowStub stub(MathPowStub::ON_STACK);
     __ CallStub(&stub);
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
@@ -3787,7 +3787,7 @@
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->true_value());
         } else {
-          __ push(isolate()->factory()->true_value());
+          __ Push(isolate()->factory()->true_value());
         }
         __ jmp(&done, Label::kNear);
         __ bind(&materialize_false);
@@ -3795,7 +3795,7 @@
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->false_value());
         } else {
-          __ push(isolate()->factory()->false_value());
+          __ Push(isolate()->factory()->false_value());
         }
         __ bind(&done);
       }
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index e93353e..a83db12 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1625,6 +1625,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d5ef4d9..23db874 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -341,6 +341,13 @@
 }
 
 
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  Handle<Object> literal = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+  return literal;
+}
+
+
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   Handle<Object> value = chunk_->LookupLiteral(op);
   return value->Number();
@@ -518,7 +525,7 @@
   } else if (context->IsConstantOperand()) {
     Handle<Object> literal =
         chunk_->LookupLiteral(LConstantOperand::cast(context));
-    LoadHeapObject(esi, Handle<Context>::cast(literal));
+    __ LoadHeapObject(esi, Handle<Context>::cast(literal));
   } else {
     UNREACHABLE();
   }
@@ -1219,7 +1226,7 @@
   Register reg = ToRegister(instr->result());
   Handle<Object> handle = instr->value();
   if (handle->IsHeapObject()) {
-    LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+    __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
   } else {
     __ Set(reg, Immediate(handle));
   }
@@ -2030,7 +2037,7 @@
   // the stub.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
-  __ mov(InstanceofStub::right(), Immediate(instr->function()));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
@@ -2137,20 +2144,7 @@
 
   // Store the value.
   __ mov(FieldOperand(object, offset), value);
-
-  // Cells are always in the remembered set.
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    __ RecordWriteField(object,
-                        offset,
-                        value,
-                        address,
-                        kSaveFPRegs,
-                        OMIT_REMEMBERED_SET,
-                        check_needed);
-  }
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2171,13 +2165,22 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ mov(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ mov(ContextOperand(context, instr->slot_index()), value);
+  Operand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(target, factory()->the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+  __ mov(target, value);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
     HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
@@ -2229,7 +2232,24 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
+  }
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
+  } else {
+    __ push(ToOperand(operand));
   }
 }
 
@@ -2639,17 +2659,13 @@
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    __ push(ToImmediate(argument));
-  } else {
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
 }
 
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  LoadHeapObject(result, instr->hydrogen()->closure());
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2719,7 +2735,7 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ mov(edi, instr->function());
+  __ LoadHeapObject(edi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2893,12 +2909,12 @@
   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
   __ ucomisd(xmm_scratch, input_reg);
   __ j(above, &below_half);
-  // input = input + 0.5
-  __ addsd(input_reg, xmm_scratch);
+  // xmm_scratch = input + 0.5
+  __ addsd(xmm_scratch, input_reg);
 
   // Compute Math.floor(value + 0.5).
   // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, Operand(input_reg));
+  __ cvttsd2si(output_reg, Operand(xmm_scratch));
 
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x80000000u);
@@ -2934,72 +2950,67 @@
 }
 
 
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, single-precision
+  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+  __ mov(scratch, 0xFF800000);
+  __ movd(xmm_scratch, scratch);
+  __ cvtss2sd(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ xorps(input_reg, input_reg);
+  __ subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(eax));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
 
-  if (exponent_type.IsDouble()) {
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
-    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
-                     4);
-  } else if (exponent_type.IsInteger32()) {
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    ASSERT(!ToRegister(right).is(ebx));
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
-    __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
-                     4);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    CpuFeatures::Scope scope(SSE2);
-    Register right_reg = ToRegister(right);
-
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-    __ SmiUntag(right_reg);
-    __ cvtsi2sd(result_reg, Operand(right_reg));
-    __ jmp(&call);
-
-    __ bind(&non_smi);
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    ASSERT(!right_reg.is(ebx));
-    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(eax, &no_deopt);
+    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
-    __ bind(&call);
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
-    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
-                     4);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-
-  // Return value is in st(0) on ia32.
-  // Store it into the (fixed) result register.
-  __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ fstp_d(Operand(esp, 0));
-  __ movdbl(result_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
 }
 
 
@@ -3072,9 +3083,6 @@
     case kMathSqrt:
       DoMathSqrt(instr);
       break;
-    case kMathPowHalf:
-      DoMathPowHalf(instr);
-      break;
     case kMathCos:
       DoMathCos(instr);
       break;
@@ -3159,7 +3167,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ mov(edi, instr->target());
+  __ LoadHeapObject(edi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3524,16 +3532,8 @@
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  if (instr->left()->IsConstantOperand()) {
-    __ push(ToImmediate(instr->left()));
-  } else {
-    __ push(ToOperand(instr->left()));
-  }
-  if (instr->right()->IsConstantOperand()) {
-    __ push(ToImmediate(instr->right()));
-  } else {
-    __ push(ToOperand(instr->right()));
-  }
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
   StringAddStub stub(NO_STRING_CHECK_IN_STUB);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
@@ -4032,7 +4032,7 @@
     __ cmp(reg, Operand::Cell(cell));
   } else {
     Operand operand = ToOperand(instr->value());
-    __ cmp(operand, instr->hydrogen()->target());
+    __ cmp(operand, target);
   }
   DeoptimizeIf(not_equal, instr->environment());
 }
@@ -4096,17 +4096,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    __ mov(result, Operand::Cell(cell));
-  } else {
-    __ mov(result, object);
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -4114,7 +4103,7 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(reg, current_prototype);
+  __ LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -4124,7 +4113,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(reg, current_prototype);
+    __ LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
@@ -4136,17 +4125,32 @@
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
 
-  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
+    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+    // Load the map's "bit field 2". We only need the first byte,
+    // but the following masking takes care of that anyway.
+    __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(ebx, Map::kElementsKindMask);
+    __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
+    DeoptimizeIf(not_equal, instr->environment());
+  }
 
   // Setup the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(constant_elements));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -4162,9 +4166,9 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
@@ -4179,7 +4183,7 @@
   ASSERT(!result.is(ecx));
 
   if (FLAG_debug_code) {
-    LoadHeapObject(ecx, object);
+    __ LoadHeapObject(ecx, object);
     __ cmp(source, ecx);
     __ Assert(equal, "Unexpected object literal boilerplate");
   }
@@ -4209,10 +4213,10 @@
       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
       __ lea(ecx, Operand(result, *offset));
       __ mov(FieldOperand(result, total_offset), ecx);
-      LoadHeapObject(source, value_object);
+      __ LoadHeapObject(source, value_object);
       EmitDeepCopy(value_object, result, source, offset);
     } else if (value->IsHeapObject()) {
-      LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+      __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
       __ mov(FieldOperand(result, total_offset), ecx);
     } else {
       __ mov(FieldOperand(result, total_offset), Immediate(value));
@@ -4237,7 +4241,7 @@
 
   __ bind(&allocated);
   int offset = 0;
-  LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
   EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
   ASSERT_EQ(size, offset);
 }
@@ -4359,11 +4363,7 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(1);
-  if (input->IsConstantOperand()) {
-    __ push(ToImmediate(input));
-  } else {
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -4487,9 +4487,7 @@
   int patch_size = Deoptimizer::patch_size();
   if (current_pc < last_lazy_deopt_pc_ + patch_size) {
     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    while (padding_size-- > 0) {
-      __ nop();
-    }
+    __ Nop(padding_size);
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
@@ -4513,11 +4511,7 @@
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
   __ push(ToOperand(obj));
-  if (key->IsConstantOperand()) {
-    __ push(ToImmediate(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -4614,16 +4608,8 @@
 void LCodeGen::DoIn(LIn* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  if (key->IsConstantOperand()) {
-    __ push(ToImmediate(key));
-  } else {
-    __ push(ToOperand(key));
-  }
-  if (obj->IsConstantOperand()) {
-    __ push(ToImmediate(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
+  EmitPushTaggedOperand(key);
+  EmitPushTaggedOperand(obj);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 9d1a4f7..dd335a4 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -207,8 +207,6 @@
                          LInstruction* instr,
                          CallKind call_kind);
 
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
 
@@ -227,6 +225,7 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
   double ToDouble(LConstantOperand* op) const;
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
                                 LOperand* key,
@@ -239,7 +238,6 @@
   void DoMathFloor(LUnaryMathOperation* instr);
   void DoMathRound(LUnaryMathOperation* instr);
   void DoMathSqrt(LUnaryMathOperation* instr);
-  void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
   void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
@@ -306,6 +304,10 @@
 
   void EnsureSpaceForLazyDeopt();
 
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
+
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4e5f278..f364439 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -298,6 +298,12 @@
 }
 
 
+void LMathPowHalf::PrintDataTo(StringStream* stream) {
+  stream->Add("/pow_half ");
+  InputAt(0)->PrintTo(stream);
+}
+
+
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
   InputAt(0)->PrintTo(stream);
   stream->Add("[%d]", slot_index());
@@ -1184,6 +1190,11 @@
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
+    if (op == kMathPowHalf) {
+      LOperand* temp = TempRegister();
+      LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+      return DefineSameAsFirst(result);
+    }
     LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
                                                                   input);
     switch (op) {
@@ -1195,8 +1206,6 @@
         return AssignEnvironment(DefineAsRegister(result));
       case kMathSqrt:
         return DefineSameAsFirst(result);
-      case kMathPowHalf:
-        return DefineSameAsFirst(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1437,9 +1446,9 @@
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
   ASSERT(instr->left()->representation().IsDouble());
-  LOperand* left = UseFixedDouble(instr->left(), xmm1);
+  LOperand* left = UseFixedDouble(instr->left(), xmm2);
   LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), xmm2) :
+      UseFixedDouble(instr->right(), xmm1) :
       UseFixed(instr->right(), eax);
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
@@ -1866,7 +1875,9 @@
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1881,7 +1892,8 @@
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new(zone()) LStoreContextSlot(context, value, temp);
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 5170647..7e126ff 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -123,6 +123,7 @@
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
+  V(MathPowHalf)                                \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -582,6 +583,24 @@
 };
 
 
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index fcae7a2..2e4cfa4 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -755,7 +755,7 @@
 
   // Push the state and the code object.
   push(Immediate(state));
-  push(CodeObject());
+  Push(CodeObject());
 
   // Link the current handler as the next handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2022,7 +2022,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(edi, Immediate(function));
+  LoadHeapObject(edi, function);
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2151,6 +2151,29 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    mov(result, Operand::Cell(cell));
+  } else {
+    mov(result, object);
+  }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    push(Operand::Cell(cell));
+  } else {
+    Push(object);
+  }
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -2182,11 +2205,6 @@
 }
 
 
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
-  mov(dst, value);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 03ec28a..46f99be 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -237,6 +237,9 @@
   void StoreToSafepointRegisterSlot(Register dst, Immediate src);
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+  void PushHeapObject(Handle<HeapObject> object);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -718,10 +721,8 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
-  void Move(Register target, Handle<Object> value);
-
   // Push a handle value.
-  void Push(Handle<Object> handle) { push(handle); }
+  void Push(Handle<Object> handle) { push(Immediate(handle)); }
 
   Handle<Object> CodeObject() {
     ASSERT(!code_object_.is_null());
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index aa8f47a..c27a60f 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -429,7 +429,7 @@
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
-  __ mov(edi, Immediate(function));
+  __ LoadHeapObject(edi, function);
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
@@ -1025,7 +1025,7 @@
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<Object> value,
+                                        Handle<JSFunction> value,
                                         Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
@@ -1036,7 +1036,7 @@
       object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(eax, value);
+  __ LoadHeapObject(eax, value);
   __ ret(0);
 }
 
@@ -2522,23 +2522,9 @@
 
   // Store the value in the cell.
   __ mov(cell_operand, eax);
-  Label done;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
-
-  __ mov(ecx, eax);
-  __ lea(edx, cell_operand);
-  // Cells are always in the remembered set.
-  __ RecordWrite(ebx,  // Object.
-                 edx,  // Address.
-                 ecx,  // Value.
-                 kDontSaveFPRegs,
-                 OMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
+  // No write barrier here, because cells are always rescanned.
 
   // Return the value (register eax).
-  __ bind(&done);
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1);
   __ ret(0);
@@ -2729,7 +2715,7 @@
 
 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
                                                    Handle<JSObject> holder,
-                                                   Handle<Object> value,
+                                                   Handle<JSFunction> value,
                                                    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -2891,7 +2877,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<Object> value) {
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 498cf3a..56cea81 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
 namespace internal {
 
 
-Address IC::address() {
+Address IC::address() const {
   // Get the address of the call.
   Address result = pc() - Assembler::kCallTargetAddressOffset;
 
diff --git a/src/ic.cc b/src/ic.cc
index 2c6d55b..ad447cc 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -40,13 +40,13 @@
 namespace internal {
 
 #ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
+char IC::TransitionMarkFromState(IC::State state) {
   switch (state) {
     case UNINITIALIZED: return '0';
     case PREMONOMORPHIC: return 'P';
     case MONOMORPHIC: return '1';
     case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
-    case MEGAMORPHIC: return 'N';
+    case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
 
     // We never see the debugger states here, because the state is
     // computed from the original code - not the patched code. Let
@@ -80,19 +80,7 @@
         raw_frame = it.frame();
       }
     }
-    if (raw_frame->is_java_script()) {
-      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      Code* js_code = frame->unchecked_code();
-      // Find the function on the stack and both the active code for the
-      // function and the original code.
-      JSFunction* function = JSFunction::cast(frame->function());
-      function->PrintName();
-      int code_offset =
-          static_cast<int>(address() - js_code->instruction_start());
-      PrintF("+%d", code_offset);
-    } else {
-      PrintF("<unknown>");
-    }
+    JavaScriptFrame::PrintTop(stdout, false, true);
     PrintF(" (%c->%c)",
            TransitionMarkFromState(old_state),
            TransitionMarkFromState(new_state));
@@ -100,13 +88,23 @@
     PrintF("]\n");
   }
 }
-#endif  // DEBUG
 
+#define TRACE_GENERIC_IC(type, reason)                          \
+  do {                                                          \
+    if (FLAG_trace_ic) {                                        \
+      PrintF("[%s patching generic stub in ", type);            \
+      JavaScriptFrame::PrintTop(stdout, false, true);           \
+      PrintF(" (%s)]\n", reason);                               \
+    }                                                           \
+  } while (false)
+
+#else
+#define TRACE_GENERIC_IC(type, reason)
+#endif  // DEBUG
 
 #define TRACE_IC(type, name, old_state, new_target)             \
   ASSERT((TraceIC(type, name, old_state, new_target), true))
 
-
 IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
   // To improve the performance of the (much used) IC code, we unfold
@@ -137,7 +135,7 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() {
+Address IC::OriginalCodeAddress() const {
   HandleScope scope;
   // Compute the JavaScript frame for the frame pointer of this IC
   // structure. We need this to be able to find the function
@@ -914,7 +912,7 @@
             name, receiver, holder, lookup->GetFieldIndex());
         break;
       case CONSTANT_FUNCTION: {
-        Handle<Object> constant(lookup->GetConstantFunction());
+        Handle<JSFunction> constant(lookup->GetConstantFunction());
         code = isolate()->stub_cache()->ComputeLoadConstant(
             name, receiver, holder, constant);
         break;
@@ -1123,6 +1121,8 @@
           stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
         }
       }
+    } else {
+      TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
     }
     if (!stub.is_null()) set_target(*stub);
   }
@@ -1163,7 +1163,7 @@
             name, receiver, holder, lookup->GetFieldIndex());
         break;
       case CONSTANT_FUNCTION: {
-        Handle<Object> constant(lookup->GetConstantFunction());
+        Handle<JSFunction> constant(lookup->GetConstantFunction());
         code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
             name, receiver, holder, constant);
         break;
@@ -1473,6 +1473,7 @@
   // via megamorphic stubs, since they don't have a map in their relocation info
   // and so the stubs can't be harvested for the object needed for a map check.
   if (target()->type() != NORMAL) {
+    TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
     return generic_stub;
   }
 
@@ -1494,12 +1495,14 @@
   if (!map_added) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
+    TRACE_GENERIC_IC("KeyedIC", "same map added twice");
     return generic_stub;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+    TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
     return generic_stub;
   }
 
@@ -1685,6 +1688,8 @@
           }
           stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
         }
+      } else {
+        TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
       }
     }
     if (!stub.is_null()) set_target(*stub);
@@ -2315,6 +2320,7 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
+    case KNOWN_OBJECTS: return "OBJECTS";
     case SYMBOLS: return "SYMBOLS";
     case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
@@ -2329,19 +2335,38 @@
                                         bool has_inlined_smi_code,
                                         Handle<Object> x,
                                         Handle<Object> y) {
-  if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
-    return GENERIC;
+  switch (state) {
+    case UNINITIALIZED:
+      if (x->IsSmi() && y->IsSmi()) return SMIS;
+      if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+      if (!Token::IsEqualityOp(op_)) return GENERIC;
+      if (x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+      if (x->IsString() && y->IsString()) return STRINGS;
+      if (x->IsJSObject() && y->IsJSObject()) {
+        if (Handle<JSObject>::cast(x)->map() ==
+            Handle<JSObject>::cast(y)->map() &&
+            Token::IsEqualityOp(op_)) {
+          return KNOWN_OBJECTS;
+        } else {
+          return OBJECTS;
+        }
+      }
+      return GENERIC;
+    case SMIS:
+      return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
+          ? HEAP_NUMBERS
+          : GENERIC;
+    case SYMBOLS:
+      ASSERT(Token::IsEqualityOp(op_));
+      return x->IsString() && y->IsString() ? STRINGS : GENERIC;
+    case HEAP_NUMBERS:
+    case STRINGS:
+    case OBJECTS:
+    case KNOWN_OBJECTS:
+    case GENERIC:
+      return GENERIC;
   }
-  if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
-  if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
-      x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
-  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
-  if (state == UNINITIALIZED &&
-      x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
-  if ((state == UNINITIALIZED || state == SYMBOLS) &&
-      x->IsString() && y->IsString()) return STRINGS;
-  if (state == UNINITIALIZED &&
-      x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+  UNREACHABLE();
   return GENERIC;
 }
 
diff --git a/src/ic.h b/src/ic.h
index 81aa6b7..94e83dc 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -91,10 +91,13 @@
   // Construct the IC structure with the given number of extra
   // JavaScript frames on the stack.
   IC(FrameDepth depth, Isolate* isolate);
+  virtual ~IC() {}
 
   // Get the call-site target; used for determining the state.
-  Code* target() { return GetTargetAtAddress(address()); }
-  inline Address address();
+  Code* target() const { return GetTargetAtAddress(address()); }
+  inline Address address() const;
+
+  virtual bool IsGeneric() const { return false; }
 
   // Compute the current IC state based on the target stub, receiver and name.
   static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -139,13 +142,15 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Computes the address in the original code when the code running is
   // containing break points (calls to DebugBreakXXX builtins).
-  Address OriginalCodeAddress();
+  Address OriginalCodeAddress() const;
 #endif
 
   // Set the call-site target.
   void set_target(Code* code) { SetTargetAtAddress(address(), code); }
 
 #ifdef DEBUG
+  char TransitionMarkFromState(IC::State state);
+
   void TraceIC(const char* type,
                Handle<Object> name,
                State old_state,
@@ -452,6 +457,10 @@
       bool is_js_array,
       ElementsKind elements_kind);
 
+  virtual bool IsGeneric() const {
+    return target() == *generic_stub();
+  }
+
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
 
@@ -477,7 +486,7 @@
   Handle<Code> megamorphic_stub() {
     return isolate()->builtins()->KeyedLoadIC_Generic();
   }
-  Handle<Code> generic_stub() {
+  Handle<Code> generic_stub() const {
     return isolate()->builtins()->KeyedLoadIC_Generic();
   }
   Handle<Code> pre_monomorphic_stub() {
@@ -595,6 +604,11 @@
       bool is_js_array,
       ElementsKind elements_kind);
 
+  virtual bool IsGeneric() const {
+    return target() == *generic_stub() ||
+        target() == *generic_stub_strict();
+  }
+
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
@@ -632,10 +646,10 @@
   Handle<Code> megamorphic_stub_strict() {
     return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
   }
-  Handle<Code> generic_stub() {
+  Handle<Code> generic_stub() const {
     return isolate()->builtins()->KeyedStoreIC_Generic();
   }
-  Handle<Code> generic_stub_strict() {
+  Handle<Code> generic_stub_strict() const {
     return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
   }
   Handle<Code> non_strict_arguments_stub() {
@@ -710,6 +724,7 @@
     SYMBOLS,
     STRINGS,
     OBJECTS,
+    KNOWN_OBJECTS,
     GENERIC
   };
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index fb3ac56..a864c34 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -619,8 +619,7 @@
   }
 
   void AddCandidate(JSFunction* function) {
-    ASSERT(function->unchecked_code() ==
-           function->unchecked_shared()->unchecked_code());
+    ASSERT(function->code() == function->shared()->code());
 
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
@@ -640,15 +639,15 @@
     while (candidate != NULL) {
       next_candidate = GetNextCandidate(candidate);
 
-      SharedFunctionInfo* shared = candidate->unchecked_shared();
+      SharedFunctionInfo* shared = candidate->shared();
 
-      Code* code = shared->unchecked_code();
+      Code* code = shared->code();
       MarkBit code_mark = Marking::MarkBitFrom(code);
       if (!code_mark.Get()) {
         shared->set_code(lazy_compile);
         candidate->set_code(lazy_compile);
       } else {
-        candidate->set_code(shared->unchecked_code());
+        candidate->set_code(shared->code());
       }
 
       // We are in the middle of a GC cycle so the write barrier in the code
@@ -674,7 +673,7 @@
       next_candidate = GetNextCandidate(candidate);
       SetNextCandidate(candidate, NULL);
 
-      Code* code = candidate->unchecked_code();
+      Code* code = candidate->code();
       MarkBit code_mark = Marking::MarkBitFrom(code);
       if (!code_mark.Get()) {
         candidate->set_code(lazy_compile);
@@ -702,7 +701,7 @@
 
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
-    Code* code = candidate->unchecked_code();
+    Code* code = candidate->code();
     return reinterpret_cast<SharedFunctionInfo**>(
         code->address() + Code::kNextCodeFlushingCandidateOffset);
   }
@@ -884,8 +883,6 @@
     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
       IC::Clear(rinfo->pc());
-      // Please note targets for cleared inline cached do not have to be
-      // marked since they are contained in HEAP->non_monomorphic_cache().
       target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     } else {
       if (FLAG_cleanup_code_caches_at_gc &&
@@ -894,9 +891,10 @@
           target->has_function_cache()) {
         CallFunctionStub::Clear(heap, rinfo->pc());
       }
-      MarkBit code_mark = Marking::MarkBitFrom(target);
-      heap->mark_compact_collector()->MarkObject(target, code_mark);
     }
+    MarkBit code_mark = Marking::MarkBitFrom(target);
+    heap->mark_compact_collector()->MarkObject(target, code_mark);
+
     heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
@@ -1037,12 +1035,12 @@
 
 
   inline static bool IsCompiled(JSFunction* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
@@ -1051,8 +1049,7 @@
 
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    MarkBit code_mark =
-        Marking::MarkBitFrom(function->unchecked_code());
+    MarkBit code_mark = Marking::MarkBitFrom(function->code());
     if (code_mark.Get()) {
       if (!Marking::MarkBitFrom(shared_info).Get()) {
         shared_info->set_code_age(0);
@@ -1061,7 +1058,7 @@
     }
 
     // We do not flush code for optimized functions.
-    if (function->code() != shared_info->unchecked_code()) {
+    if (function->code() != shared_info->code()) {
       return false;
     }
 
@@ -1072,7 +1069,7 @@
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
     MarkBit code_mark =
-        Marking::MarkBitFrom(shared_info->unchecked_code());
+        Marking::MarkBitFrom(shared_info->code());
     if (code_mark.Get()) {
       return false;
     }
@@ -1085,16 +1082,24 @@
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
-    if (function_data->IsFunctionTemplateInfo()) return false;
+    if (function_data->IsFunctionTemplateInfo()) {
+      return false;
+    }
 
     // Only flush code for functions.
-    if (shared_info->code()->kind() != Code::FUNCTION) return false;
+    if (shared_info->code()->kind() != Code::FUNCTION) {
+      return false;
+    }
 
     // Function must be lazy compilable.
-    if (!shared_info->allows_lazy_compilation()) return false;
+    if (!shared_info->allows_lazy_compilation()) {
+      return false;
+    }
 
     // If this is a full script wrapped in a function we do no flush the code.
-    if (shared_info->is_toplevel()) return false;
+    if (shared_info->is_toplevel()) {
+      return false;
+    }
 
     // Age this shared function info.
     if (shared_info->code_age() < kCodeAgeThreshold) {
@@ -1267,30 +1272,12 @@
     }
 
     if (!flush_code_candidate) {
-      Code* code = jsfunction->unchecked_shared()->unchecked_code();
+      Code* code = jsfunction->shared()->code();
       MarkBit code_mark = Marking::MarkBitFrom(code);
-      heap->mark_compact_collector()->MarkObject(code, code_mark);
+      collector->MarkObject(code, code_mark);
 
-      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        // For optimized functions we should retain both non-optimized version
-        // of it's code and non-optimized version of all inlined functions.
-        // This is required to support bailing out from inlined code.
-        DeoptimizationInputData* data =
-            reinterpret_cast<DeoptimizationInputData*>(
-                jsfunction->unchecked_code()->unchecked_deoptimization_data());
-
-        FixedArray* literals = data->UncheckedLiteralArray();
-
-        for (int i = 0, count = data->InlinedFunctionCount()->value();
-             i < count;
-             i++) {
-          JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
-          Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
-          MarkBit inlined_code_mark =
-              Marking::MarkBitFrom(inlined_code);
-          heap->mark_compact_collector()->MarkObject(
-              inlined_code, inlined_code_mark);
-        }
+      if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        collector->MarkInlinedFunctionsCode(jsfunction->code());
       }
     }
 
@@ -1415,11 +1402,7 @@
       : collector_(collector) {}
 
   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-      Code* code = it.frame()->unchecked_code();
-      MarkBit code_bit = Marking::MarkBitFrom(code);
-      collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
-    }
+    collector_->PrepareThreadForCodeFlushing(isolate, top);
   }
 
  private:
@@ -1441,8 +1424,8 @@
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
       MarkBit shared_mark = Marking::MarkBitFrom(shared);
-      MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
-      collector_->MarkObject(shared->unchecked_code(), code_mark);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+      collector_->MarkObject(shared->code(), code_mark);
       collector_->MarkObject(shared, shared_mark);
     }
   }
@@ -1452,6 +1435,44 @@
 };
 
 
+void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
+  // For optimized functions we should retain both non-optimized version
+  // of it's code and non-optimized version of all inlined functions.
+  // This is required to support bailing out from inlined code.
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+
+  FixedArray* literals = data->LiteralArray();
+
+  for (int i = 0, count = data->InlinedFunctionCount()->value();
+       i < count;
+       i++) {
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    Code* inlined_code = inlined->shared()->code();
+    MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
+    MarkObject(inlined_code, inlined_code_mark);
+  }
+}
+
+
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
+                                                        ThreadLocalTop* top) {
+  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    // Note: for the frame that has a pending lazy deoptimization
+    // StackFrame::unchecked_code will return a non-optimized code object for
+    // the outermost function and StackFrame::LookupCode will return
+    // actual optimized code object.
+    StackFrame* frame = it.frame();
+    Code* code = frame->unchecked_code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
+    if (frame->is_optimized()) {
+      MarkInlinedFunctionsCode(frame->LookupCode());
+    }
+  }
+}
+
+
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
@@ -1479,11 +1500,8 @@
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
-  for (StackFrameIterator it; !it.done(); it.Advance()) {
-    Code* code = it.frame()->unchecked_code();
-    MarkBit code_mark = Marking::MarkBitFrom(code);
-    MarkObject(code, code_mark);
-  }
+  PrepareThreadForCodeFlushing(heap()->isolate(),
+                               heap()->isolate()->thread_local_top());
 
   // Iterate the archived stacks in all threads to check if
   // the code is referenced.
@@ -2081,6 +2099,24 @@
 
   PrepareForCodeFlushing();
 
+  if (was_marked_incrementally_) {
+    // There is no write barrier on cells so we have to scan them now at the end
+    // of the incremental marking.
+    {
+      HeapObjectIterator cell_iterator(heap()->cell_space());
+      HeapObject* cell;
+      while ((cell = cell_iterator.Next()) != NULL) {
+        ASSERT(cell->IsJSGlobalPropertyCell());
+        if (IsMarked(cell)) {
+          int offset = JSGlobalPropertyCell::kValueOffset;
+          StaticMarkingVisitor::VisitPointer(
+              heap(),
+              reinterpret_cast<Object**>(cell->address() + offset));
+        }
+      }
+    }
+  }
+
   RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
@@ -3673,6 +3709,7 @@
 #endif
   SweeperType how_to_sweep =
       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
   if (sweep_precisely_) how_to_sweep = PRECISE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 254f175..e0a7d94 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -383,6 +383,10 @@
 };
 
 
+// Defined in isolate.h.
+class ThreadLocalTop;
+
+
 // -------------------------------------------------------------------------
 // Mark-Compact collector
 class MarkCompactCollector {
@@ -603,6 +607,14 @@
   friend class CodeMarkingVisitor;
   friend class SharedFunctionInfoMarkingVisitor;
 
+  // Mark non-optimize code for functions inlined into the given optimized
+  // code. This will prevent it from being flushed.
+  void MarkInlinedFunctionsCode(Code* code);
+
+  // Mark code objects that are active on the stack to prevent them
+  // from being flushed.
+  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
+
   void PrepareForCodeFlushing();
 
   // Marking operations for objects reachable from roots.
diff --git a/src/messages.js b/src/messages.js
index 5a3f12e..5310938 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -246,6 +246,7 @@
       "unprotected_const",            ["Illegal const declaration in unprotected statement context."],
       "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
       "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+      "harmony_const_assign",         ["Assignment to constant variable."],
     ];
     var messages = { __proto__ : null };
     for (var i = 0; i < messagesDictionary.length; i += 2) {
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index e7dda3f..47f24a0 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -255,21 +255,61 @@
 }
 
 
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  // [sp]: constant elements.
-  // [sp + kPointerSize]: literal index.
-  // [sp + (2 * kPointerSize)]: literals array.
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  // a3: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
 
   // All sizes here are multiples of kPointerSize.
   int elements_size = 0;
-  if (length_ > 0) {
-    elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length_)
-        : FixedArray::SizeFor(length_);
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
   }
   int size = JSArray::kSize + elements_size;
 
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        fail,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    ASSERT((elements_size % kPointerSize) == 0);
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+}
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: constant elements.
+  // [sp + kPointerSize]: literal index.
+  // [sp + (2 * kPointerSize)]: literals array.
+
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -282,17 +322,42 @@
   __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
   __ Branch(&slow_case, eq, a3, Operand(t1));
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
+    __ Branch(&check_fast_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&check_fast_elements);
+    __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+    __ Branch(&double_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
-    } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
       message = "Expected (writable) fixed double array";
       expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -304,35 +369,7 @@
     __ pop(a3);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  // Return new object in v0.
-  __ AllocateInNewSpace(size,
-                        v0,
-                        a1,
-                        a2,
-                        &slow_case,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ lw(a1, FieldMemOperand(a3, i));
-      __ sw(a1, FieldMemOperand(v0, i));
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ Addu(a2, v0, Operand(JSArray::kSize));
-    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    ASSERT((elements_size % kPointerSize) == 0);
-    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
-  }
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
 
   // Return and remove the on-stack parameters.
   __ Addu(sp, sp, Operand(3 * kPointerSize));
@@ -343,6 +380,51 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: object literal flags.
+  // [sp + kPointerSize]: constant properties.
+  // [sp + (2 * kPointerSize)]: literal index.
+  // [sp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into a3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, t0, a3);
+  __ lw(a3, MemOperand(a3));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t0));
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ lw(a1, FieldMemOperand(a3, i));
+    __ sw(a1, FieldMemOperand(a0, i));
+  }
+
+  // Return and remove the on-stack parameters.
+  __ Drop(4);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -3510,113 +3592,218 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
+  CpuFeatures::Scope fpu_scope(FPU);
+  const Register base = a1;
+  const Register exponent = a2;
+  const Register heapnumbermap = t1;
+  const Register heapnumber = v0;
+  const DoubleRegister double_base = f2;
+  const DoubleRegister double_exponent = f4;
+  const DoubleRegister double_result = f0;
+  const DoubleRegister double_scratch = f6;
+  const FPURegister single_scratch = f8;
+  const Register scratch = t5;
+  const Register scratch2 = t3;
 
-  if (CpuFeatures::IsSupported(FPU)) {
-    CpuFeatures::Scope scope(FPU);
-
-    Label base_not_smi;
-    Label exponent_not_smi;
-    Label convert_exponent;
-
-    const Register base = a0;
-    const Register exponent = a2;
-    const Register heapnumbermap = t1;
-    const Register heapnumber = s0;  // Callee-saved register.
-    const Register scratch = t2;
-    const Register scratch2 = t3;
-
-    // Alocate FP values in the ABI-parameter-passing regs.
-    const DoubleRegister double_base = f12;
-    const DoubleRegister double_exponent = f14;
-    const DoubleRegister double_result = f0;
-    const DoubleRegister double_scratch = f2;
-
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+  Label call_runtime, done, exponent_not_smi, int_exponent;
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
     __ lw(base, MemOperand(sp, 1 * kPointerSize));
     __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    // Convert base to double value and store it in f0.
-    __ JumpIfNotSmi(base, &base_not_smi);
-    // Base is a Smi. Untag and convert it.
-    __ SmiUntag(base);
-    __ mtc1(base, double_scratch);
-    __ cvt_d_w(double_base, double_scratch);
-    __ Branch(&convert_exponent);
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
 
-    __ bind(&base_not_smi);
+    __ JumpIfSmi(base, &base_is_smi);
     __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-    // Base is a heapnumber. Load it into double register.
-    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
 
-    __ bind(&convert_exponent);
+    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent);
+
+    __ bind(&base_is_smi);
+    __ SmiUntag(base);
+    __ mtc1(base, single_scratch);
+    __ cvt_d_w(double_base, single_scratch);
+    __ bind(&unpack_exponent);
+
     __ JumpIfNotSmi(exponent, &exponent_not_smi);
     __ SmiUntag(exponent);
-
-    // The base is in a double register and the exponent is
-    // an untagged smi. Allocate a heap number and call a
-    // C function for integer exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(ra);
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(double_base, exponent);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ CallCFunction(
-          ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
-      __ pop(ra);
-      __ GetCFunctionDoubleResult(double_result);
-    }
-    __ sdc1(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(v0, heapnumber);
-    __ DropAndRet(2 * kPointerSize);
+    __ jmp(&int_exponent);
 
     __ bind(&exponent_not_smi);
     __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-    // Exponent is a heapnumber. Load it into double register.
     __ ldc1(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    // Base is already in double_base.
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
 
-    // The base and the exponent are in double registers.
-    // Allocate a heap number and call a C function for
-    // double exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(ra);
-    __ PrepareCallCFunction(0, 2, scratch);
-    // ABI (o32) for func(double a, double b): a in f12, b in f14.
-    ASSERT(double_base.is(f12));
-    ASSERT(double_exponent.is(f14));
-    __ SetCallCDoubleArguments(double_base, double_exponent);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()),
-          0,
-          2);
-      __ pop(ra);
-      __ GetCFunctionDoubleResult(double_result);
-    }
-    __ sdc1(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(v0, heapnumber);
-    __ DropAndRet(2 * kPointerSize);
+    __ bind(&exponent_not_smi);
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  if (exponent_type_ != INTEGER) {
+    Label int_exponent_convert;
+    // Detect integer exponents stored as double.
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       single_scratch,
+                       double_exponent,
+                       scratch,
+                       scratch2,
+                       kCheckForInexactConversion);
+    // scratch2 == 0 means there was no conversion error.
+    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half;
+
+      // Test for 0.5.
+      __ Move(double_scratch, 0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &not_plus_half,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ neg_d(double_result, double_scratch);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ sqrt_d(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&not_plus_half);
+      __ Move(double_scratch, -0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &call_runtime,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ Move(double_result, kDoubleRegZero);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ Move(double_result, 1);
+      __ sqrt_d(double_scratch, double_scratch);
+      __ div_d(double_result, double_result, double_scratch);
+      __ jmp(&done);
+    }
+
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ jmp(&done);
+
+    __ bind(&int_exponent_convert);
+    __ mfc1(exponent, single_scratch);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  __ mov(scratch, exponent);  // Back up exponent.
+  __ mov_d(double_scratch, double_base);  // Back up base.
+  __ Move(double_result, 1.0);
+
+  // Get absolute value of exponent.
+  Label positive_exponent;
+  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+  __ Subu(scratch, zero_reg, scratch);
+  __ bind(&positive_exponent);
+
+  Label while_true, no_carry, loop_end;
+  __ bind(&while_true);
+
+  __ And(scratch2, scratch, 1);
+
+  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+  __ mul_d(double_result, double_result, double_scratch);
+  __ bind(&no_carry);
+
+  __ sra(scratch, scratch, 1);
+
+  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+  __ mul_d(double_scratch, double_scratch, double_scratch);
+
+  __ Branch(&while_true);
+
+  __ bind(&loop_end);
+
+  __ Branch(&done, ge, exponent, Operand(zero_reg));
+  __ Move(double_scratch, 1.0);
+  __ div_d(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+  // double_exponent may not contain the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ mtc1(exponent, single_scratch);
+  __ cvt_d_w(double_exponent, single_scratch);
+
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(
+        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    ASSERT(heapnumber.is(v0));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ DropAndRet(2);
+  } else {
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
 }
 
 
@@ -4759,8 +4946,12 @@
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   // First check for flat string.  None of the following string type tests will
-  // succeed if kIsNotStringTag is set.
-  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  // succeed if subject is not a string or a short external string.
+  __ And(a1,
+         a0,
+         Operand(kIsNotStringMask |
+                 kStringRepresentationMask |
+                 kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
 
@@ -4774,16 +4965,17 @@
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
-  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
 
-  // Catch non-string subject (should already have been guarded against).
-  STATIC_ASSERT(kNotStringTag != 0);
-  __ And(at, a1, Operand(kIsNotStringMask));
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
 
   // String is sliced.
@@ -4804,7 +4996,7 @@
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ And(at, a0, Operand(kStringRepresentationMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
 
   __ bind(&seq_string);
   // subject: Subject string
@@ -5030,6 +5222,29 @@
   __ Addu(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
+  // External string.  Short external strings have already been ruled out.
+  // a0: scratch
+  __ bind(&external_string);
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, a0, Operand(kIsIndirectStringMask));
+    __ Assert(eq,
+              "external string expected, but not found",
+              at,
+              Operand(zero_reg));
+  }
+  __ lw(subject,
+        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Subu(subject,
+          subject,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&seq_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -5288,77 +5503,14 @@
   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
   __ Branch(index_out_of_range_, ls, t0, Operand(index_));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+  __ sra(index_, index_, kSmiTagSize);
 
-  // Handle non-flat strings.
-  __ And(result_, result_, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
-  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
-  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
-  __ Branch(&call_runtime_, ne, result_, Operand(t0));
-
-  // Get the first of the two parts.
-  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
-  __ Addu(index_, index_, result_);
-  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // Check that parent is not an external string. Go to runtime otherwise.
-  // Note that if the original string is a cons or slice with an external
-  // string as underlying string, we pass that unpacked underlying string with
-  // the adjusted index to the runtime function.
-  STATIC_ASSERT(kSeqStringTag == 0);
-
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ And(t0, result_, Operand(kStringEncodingMask));
-  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register. We can
-  // add without shifting since the smi tag size is the log2 of the
-  // number of bytes in a two-byte character.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
-  __ Addu(index_, object_, Operand(index_));
-  __ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
-  __ Branch(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-
-  __ srl(t0, index_, kSmiTagSize);
-  __ Addu(index_, object_, t0);
-
-  __ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
   __ sll(result_, result_, kSmiTagSize);
   __ bind(&exit_);
 }
@@ -5407,6 +5559,7 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
+  __ sll(index_, index_, kSmiTagSize);
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
 
@@ -6821,26 +6974,39 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  __ Push(a1, a0);
-  __ push(ra);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ And(a2, a1, a0);
+  __ JumpIfSmi(a2, &miss);
+  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a2, Operand(known_map_));
+  __ Branch(&miss, ne, a3, Operand(known_map_));
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                             masm->isolate());
+  __ Ret(USE_DELAY_SLOT);
+  __ subu(v0, a0, a1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(a1, a0);
+    __ push(ra);
+    __ Push(a1, a0);
     __ li(t0, Operand(Smi::FromInt(op_)));
     __ push(t0);
     __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ Pop(a1, a0, ra);
   }
-  // Compute the entry point of the rewritten stub.
-  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  // Restore registers.
-  __ pop(ra);
-  __ pop(a0);
-  __ pop(a1);
   __ Jump(a2);
 }
 
@@ -7463,7 +7629,8 @@
   // Update the write barrier for the array store.
   __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
 
   // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
   // FAST_ELEMENTS, and value is Smi.
@@ -7472,14 +7639,16 @@
   __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ Addu(t2, t1, t2);
   __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
 
   // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
   __ bind(&double_elements);
   __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
   __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
                                  &slow_elements);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
 }
 
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index c94e0fa..0b68384 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -310,6 +310,98 @@
   __ pop(ra);
 }
 
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ And(at, result, Operand(kIsIndirectStringMask));
+  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ And(at, result, Operand(kSlicedNotConsMask));
+  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ sra(at, result, kSmiTagSize);
+  __ Addu(index, index, at);
+  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+  __ Branch(call_runtime, ne, result, Operand(at));
+  // Get the first of the two strings and load its instance type.
+  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Addu(string,
+          string,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found",
+        at, Operand(zero_reg));
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ And(at, result, Operand(kShortExternalStringMask));
+  __ Branch(call_runtime, ne, at, Operand(zero_reg));
+  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label ascii, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(at, result, Operand(kStringEncodingMask));
+  __ Branch(&ascii, ne, at, Operand(zero_reg));
+  // Two-byte string.
+  __ sll(at, index, 1);
+  __ Addu(at, string, at);
+  __ lhu(result, MemOperand(at));
+  __ jmp(&done);
+  __ bind(&ascii);
+  // Ascii string.
+  __ Addu(at, string, index);
+  __ lbu(result, MemOperand(at));
+  __ bind(&done);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 4549509..e704c4f 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -75,6 +75,21 @@
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 201e6b8..1e950e5 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1424,10 +1424,11 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->constant_properties()));
+  __ li(a1, Operand(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1436,10 +1437,15 @@
       : ObjectLiteral::kNoFlags;
   __ li(a0, Operand(Smi::FromInt(flags)));
   __ Push(a3, a2, a1, a0);
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1540,6 +1546,7 @@
   ASSERT_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
   Handle<FixedArrayBase> constant_elements_values(
       FixedArrayBase::cast(constant_elements->get(1)));
 
@@ -1549,7 +1556,7 @@
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
   __ li(a1, Operand(constant_elements));
   __ Push(a3, a2, a1);
-  if (constant_elements_values->map() ==
+  if (has_fast_elements && constant_elements_values->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1564,10 +1571,9 @@
     ASSERT(constant_elements_kind == FAST_ELEMENTS ||
            constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
            FLAG_smi_only_arrays);
-    FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub::Mode mode = has_fast_elements
+      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
@@ -1589,65 +1595,30 @@
       __ push(v0);
       result_saved = true;
     }
+
     VisitForAccumulatorValue(subexpr);
 
-    __ lw(t6, MemOperand(sp));  // Copy of array literal.
-    __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
-    __ lw(a2, FieldMemOperand(t6, JSObject::kMapOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
-    Label element_done;
-    Label double_elements;
-    Label smi_element;
-    Label slow_elements;
-    Label fast_elements;
-    __ CheckFastElements(a2, a3, &double_elements);
-
-    // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-    __ JumpIfSmi(result_register(), &smi_element);
-    __ CheckFastSmiOnlyElements(a2, a3, &fast_elements);
-
-    // Store into the array literal requires a elements transition. Call into
-    // the runtime.
-    __ bind(&slow_elements);
-    __ push(t6);  // Copy of array literal.
-    __ li(a1, Operand(Smi::FromInt(i)));
-    __ li(a2, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-    StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
-        ? kNonStrictMode : kStrictMode;
-    __ li(a3, Operand(Smi::FromInt(strict_mode_flag)));  // Strict mode.
-    __ Push(a1, result_register(), a2, a3);
-    __ CallRuntime(Runtime::kSetProperty, 5);
-    __ Branch(&element_done);
-
-      // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-    __ bind(&double_elements);
-    __ li(a3, Operand(Smi::FromInt(i)));
-    __ StoreNumberToDoubleElements(result_register(), a3, t6, a1, t0, t1, t5,
-                                   t3, &slow_elements);
-    __ Branch(&element_done);
-
-    // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-    __ bind(&fast_elements);
-    __ sw(result_register(), FieldMemOperand(a1, offset));
-    // Update the write barrier for the array store.
-
-    __ RecordWriteField(
-        a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
-        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-    __ Branch(&element_done);
-
-    // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-    // FAST_ELEMENTS, and value is Smi.
-    __ bind(&smi_element);
-    __ sw(result_register(), FieldMemOperand(a1, offset));
-    // Fall through
-
-    __ bind(&element_done);
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ lw(t2, MemOperand(sp));  // Copy of array literal.
+      __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
+      __ sw(result_register(), FieldMemOperand(a1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(a1, offset, result_register(), a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ lw(a1, MemOperand(sp));  // Copy of array literal.
+      __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+      __ li(a3, Operand(Smi::FromInt(i)));
+      __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
+      __ mov(a0, result_register());
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -2987,8 +2958,12 @@
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  MathPowStub stub;
-  __ CallStub(&stub);
+  if (CpuFeatures::IsSupported(FPU)) {
+    MathPowStub stub(MathPowStub::ON_STACK);
+    __ CallStub(&stub);
+  } else {
+    __ CallRuntime(Runtime::kMath_pow, 2);
+  }
   context()->Plug(v0);
 }
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index b057695..c240125 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1587,6 +1587,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index c0879bb..aba7516 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -291,7 +291,22 @@
   if (op->IsRegister()) {
     return ToRegister(op->index());
   } else if (op->IsConstantOperand()) {
-    __ li(scratch, ToOperand(op));
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort("EmitLoadRegister: Unsupported double immediate.");
+    } else {
+      ASSERT(r.IsTagged());
+      if (literal->IsSmi()) {
+        __ li(scratch, Operand(literal));
+      } else {
+       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+      }
+    }
     return scratch;
   } else if (op->IsStackSlot() || op->IsArgument()) {
     __ lw(scratch, ToMemOperand(op));
@@ -1162,8 +1177,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ li(ToRegister(instr->result()), Operand(instr->value()));
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ li(ToRegister(instr->result()), Operand(value));
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
 }
 
 
@@ -2039,7 +2059,7 @@
   // offset to the location of the map check.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(temp.is(t0));
-  __ li(InstanceofStub::right(), Operand(instr->function()));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 7;
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   Label before_push_delta;
@@ -2141,21 +2161,7 @@
 
   // Store the value.
   __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-
-  // Cells are always in the remembered set.
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    __ RecordWriteField(scratch,
-                        JSGlobalPropertyCell::kValueOffset,
-                        value,
-                        scratch2,
-                        kRAHasBeenSaved,
-                        kSaveFPRegs,
-                        OMIT_REMEMBERED_SET,
-                        check_needed);
-  }
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2175,6 +2181,10 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ lw(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+  }
 }
 
 
@@ -2182,6 +2192,12 @@
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
   MemOperand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Register scratch = scratch0();
+    __ lw(scratch, target);
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+  }
   __ sw(value, target);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
     HType type = instr->hydrogen()->value()->type();
@@ -2233,7 +2249,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
   }
 }
 
@@ -2687,7 +2703,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  LoadHeapObject(result, instr->hydrogen()->closure());
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2757,7 +2773,7 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(v0));
   __ mov(a0, v0);
-  __ li(a1, Operand(instr->function()));
+  __ LoadHeapObject(a1, instr->function());
   CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
 }
 
@@ -2942,11 +2958,11 @@
   __ And(scratch, result, Operand(HeapNumber::kSignMask));
 
   __ Move(double_scratch0(), 0.5);
-  __ add_d(input, input, double_scratch0());
+  __ add_d(double_scratch0(), input, double_scratch0());
 
   // Check sign of the result: if the sign changed, the input
   // value was in ]0.5, 0[ and the result should be -0.
-  __ mfc1(result, input.high());
+  __ mfc1(result, double_scratch0().high());
   __ Xor(result, result, Operand(scratch));
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // ARM uses 'mi' here, which is 'lt'
@@ -2966,7 +2982,7 @@
 
   __ EmitFPUTruncate(kRoundToMinusInf,
                      double_scratch0().low(),
-                     input,
+                     double_scratch0(),
                      result,
                      except_flag);
 
@@ -2996,69 +3012,54 @@
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+  ASSERT(!input.is(result));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ Move(temp, -V8_INFINITY);
+  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+  // Set up Infinity in the delay slot.
+  // result is overwritten if the branch is not taken.
+  __ neg_d(result, temp);
 
   // Add +0 to convert -0 to +0.
-  __ mtc1(zero_reg, double_scratch.low());
-  __ mtc1(zero_reg, double_scratch.high());
-  __ add_d(result, input, double_scratch);
+  __ add_d(result, input, kDoubleRegZero);
   __ sqrt_d(result, result);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  Register scratch = scratch0();
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  if (exponent_type.IsDouble()) {
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left),
-                               ToDoubleRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(f4));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(a2));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+  ASSERT(ToDoubleRegister(instr->result()).is(f0));
+
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(a2, &no_deopt);
+    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
   } else if (exponent_type.IsInteger32()) {
-    ASSERT(ToRegister(right).is(a0));
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 1, 1);
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsTagged());
-    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
-
-    Register right_reg = ToRegister(right);
-
-    // Check for smi on the right hand side.
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-
-    // Untag smi and convert it to a double.
-    __ SmiUntag(right_reg);
-    FPURegister single_scratch = double_scratch0();
-    __ mtc1(right_reg, single_scratch);
-    __ cvt_d_w(result_reg, single_scratch);
-    __ Branch(&call);
-
-    // Heap number map check.
-    __ bind(&non_smi);
-    __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-    DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
-    __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
-
-    // Prepare arguments and call C function.
-    __ bind(&call);
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-  // Store the result in the result register.
-  __ GetCFunctionDoubleResult(result_reg);
 }
 
 
@@ -3194,7 +3195,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(v0));
-  __ li(a1, Operand(instr->target()));
+  __ LoadHeapObject(a1, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3520,89 +3521,13 @@
     LStringCharCodeAt* instr_;
   };
 
-  Register temp = scratch1();
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
-
-  // Fetch the instance type of the receiver into result register.
-  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ And(temp, result, kIsIndirectStringMask);
-  __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ And(temp, result, kSlicedNotConsMask);
-  __ Branch(&cons_string, eq, temp, Operand(zero_reg));
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ sra(temp, result, kSmiTagSize);
-  __ addu(index, index, temp);
-  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
-  __ Branch(deferred->entry(), ne, result, Operand(temp));
-  // Get the first of the two strings and load its instance type.
-  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  // Note that if the original string is a cons or slice with an external
-  // string as underlying string, we pass that unpacked underlying string with
-  // the adjusted index to the runtime function.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(temp, result, Operand(kStringRepresentationMask));
-  __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ And(temp, result, Operand(kStringEncodingMask));
-  __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  __ Addu(result,
-          string,
-          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ sll(temp, index, 1);
-  __ Addu(result, result, temp);
-  __ lhu(result, MemOperand(result, 0));
-  __ Branch(&done);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ Addu(result,
-          string,
-          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ Addu(result, result, index);
-  __ lbu(result, MemOperand(result, 0));
-
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -4098,10 +4023,20 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  DeoptimizeIf(ne, instr->environment(), reg,
-               Operand(instr->hydrogen()->target()));
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ li(at, Operand(Handle<Object>(cell)));
+    __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+    DeoptimizeIf(ne, instr->environment(), reg,
+                 Operand(at));
+  } else {
+    DeoptimizeIf(ne, instr->environment(), reg,
+                 Operand(target));
+  }
 }
 
 
@@ -4170,19 +4105,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result,
-                              Handle<HeapObject> object) {
-  if (heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        factory()->NewJSGlobalPropertyCell(object);
-    __ li(result, Operand(cell));
-    __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
-  } else {
-    __ li(result, Operand(object));
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register temp1 = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
@@ -4191,7 +4113,7 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(temp1, current_prototype);
+  __ LoadHeapObject(temp1, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -4203,7 +4125,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(temp1, current_prototype);
+    __ LoadHeapObject(temp1, current_prototype);
   }
 
   // Check the holder map.
@@ -4216,15 +4138,32 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
 
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
+    // Load map into a2.
+    __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    DeoptimizeIf(ne,
+                 instr->environment(),
+                 a2,
+                 Operand(boilerplate_elements_kind));
+  }
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ li(a1, Operand(constant_elements));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
   __ Push(a3, a2, a1);
 
   // Pick the right runtime function or stub to call.
@@ -4241,29 +4180,108 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(a2));
+  ASSERT(!result.is(a2));
+
+  // Increase the offset so that subsequent objects end up right after
+  // this one.
+  int current_offset = *offset;
+  int size = object->map()->instance_size();
+  *offset += size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  ASSERT(object->elements()->length() == 0 ||
+         object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    __ lw(a2, FieldMemOperand(source, i));
+    __ sw(a2, FieldMemOperand(result, current_offset + i));
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ Addu(a2, result, Operand(*offset));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+      __ LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    } else {
+      __ li(a2, Operand(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    }
+  }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
   ASSERT(ToRegister(instr->result()).is(v0));
+
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
   __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
   __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ li(a2, Operand(instr->hydrogen()->constant_properties()));
-  __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+  __ li(a2, Operand(constant_properties));
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  __ li(a1, Operand(Smi::FromInt(flags)));
   __ Push(t0, a3, a2, a1);
 
   // Pick the right runtime function to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index c67b46b..32d4fb3 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -316,6 +316,13 @@
                                        Handle<Map> type,
                                        Handle<String> name);
 
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 81a193a..f963ec9 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1152,6 +1152,13 @@
     LOperand* input = UseFixedDouble(instr->value(), f4);
     LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
     return MarkAsCall(DefineFixedDouble(result, f4), instr);
+  } else if (op == kMathPowHalf) {
+    // Input cannot be the same as the result.
+    // See lithium-codegen-mips.cc::DoMathPowHalf.
+    LOperand* input = UseFixedDouble(instr->value(), f8);
+    LOperand* temp = FixedTemp(f6);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+    return DefineFixedDouble(result, f4);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1165,8 +1172,6 @@
         return DefineAsRegister(result);
       case kMathRound:
         return AssignEnvironment(DefineAsRegister(result));
-      case kMathPowHalf:
-        return DefineAsRegister(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1401,9 +1406,9 @@
   LOperand* left = UseFixedDouble(instr->left(), f2);
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), f4) :
-      UseFixed(instr->right(), a0);
+      UseFixed(instr->right(), a2);
   LPower* result = new LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, f6),
+  return MarkAsCall(DefineFixedDouble(result, f0),
                     instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
@@ -1796,7 +1801,8 @@
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1810,7 +1816,8 @@
     context = UseRegister(instr->context());
     value = UseRegister(instr->value());
   }
-  return new LStoreContextSlot(context, value);
+  LInstruction* result = new LStoreContextSlot(context, value);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -2071,8 +2078,14 @@
 }
 
 
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+    HObjectLiteralGeneric* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
 }
 
 
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 40f3f7a..efc5e27 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -134,7 +134,8 @@
   V(NumberTagD)                                 \
   V(NumberTagI)                                 \
   V(NumberUntagD)                               \
-  V(ObjectLiteral)                              \
+  V(ObjectLiteralFast)                          \
+  V(ObjectLiteralGeneric)                       \
   V(OsrEntry)                                   \
   V(OuterContext)                               \
   V(Parameter)                                  \
@@ -1792,6 +1793,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return InputAt(0); }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1899,10 +1902,17 @@
 };
 
 
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
 };
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index c1161d7..cdacbf3 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -81,6 +81,19 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    li(result, Operand(cell));
+    lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    li(result, Operand(object));
+  }
+}
+
+
 // Push and pop all registers that can hold pointers.
 void MacroAssembler::PushSafepointRegisters() {
   // Safepoints expect a block of kNumSafepointRegisters values on the
@@ -3555,7 +3568,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  li(a1, Operand(function));
+  LoadHeapObject(a1, function);
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 6b2a551..4e14fbf 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -262,6 +262,7 @@
                  Heap::RootListIndex index,
                  Condition cond, Register src1, const Operand& src2);
 
+  void LoadHeapObject(Register dst, Handle<HeapObject> object);
 
   // ---------------------------------------------------------------------------
   // GC Support
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 76452f0..a94e277 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -574,7 +574,7 @@
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
-  __ li(t1, Operand(function));
+  __ LoadHeapObject(t1, function);
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
@@ -1115,7 +1115,7 @@
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<Object> value,
+                                        Handle<JSFunction> value,
                                         Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
@@ -1127,7 +1127,7 @@
                       scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ li(v0, Operand(value));
+  __ LoadHeapObject(v0, value);
   __ Ret();
 }
 
@@ -2605,15 +2605,7 @@
   // Store the value in the cell.
   __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ mov(v0, a0);  // Stored value must be returned in v0.
-
-  // This trashes a0 but the value is returned in v0 anyway.
-  __ RecordWriteField(t0,
-                      JSGlobalPropertyCell::kValueOffset,
-                      a0,
-                      a2,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET);
+  // Cells are always rescanned, so no write barrier here.
 
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
@@ -2709,7 +2701,7 @@
 
 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
                                                    Handle<JSObject> holder,
-                                                   Handle<Object> value,
+                                                   Handle<JSFunction> value,
                                                    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
@@ -2847,7 +2839,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<Object> value) {
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 39d6e04..d6d6571 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1115,7 +1115,7 @@
 
 
 // Unsafe accessor omitting write barrier.
-void HeapObject::set_map_unsafe(Map* value) {
+void HeapObject::set_map_no_write_barrier(Map* value) {
   set_map_word(MapWord::FromMap(value));
 }
 
@@ -1183,6 +1183,22 @@
 ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
 
 
+Object** FixedArray::GetFirstElementAddress() {
+  return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+  Object* the_hole = GetHeap()->the_hole_value();
+  Object** current = GetFirstElementAddress();
+  for (int i = 0; i < length(); ++i) {
+    Object* candidate = *current++;
+    if (!candidate->IsSmi() && candidate != the_hole) return false;
+  }
+  return true;
+}
+
+
 FixedArrayBase* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
   return static_cast<FixedArrayBase*>(array);
@@ -1211,38 +1227,66 @@
 }
 
 
-MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
 #if DEBUG
   ValidateSmiOnlyElements();
 #endif
-  if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
-    Object* obj;
-    MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    set_map(Map::cast(obj));
+  if ((map()->elements_kind() != FAST_ELEMENTS)) {
+    return TransitionElementsKind(FAST_ELEMENTS);
   }
   return this;
 }
 
 
 MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
-                                                uint32_t count) {
-  if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
-    for (uint32_t i = 0; i < count; ++i) {
-      Object* current = *objects++;
-      if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
-        return EnsureCanContainNonSmiElements();
+                                                uint32_t count,
+                                                EnsureElementsMode mode) {
+  ElementsKind current_kind = map()->elements_kind();
+  ElementsKind target_kind = current_kind;
+  ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (current_kind == FAST_ELEMENTS) return this;
+
+  Heap* heap = GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* heap_number_map = heap->heap_number_map();
+  for (uint32_t i = 0; i < count; ++i) {
+    Object* current = *objects++;
+    if (!current->IsSmi() && current != the_hole) {
+      if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
+          HeapObject::cast(current)->map() == heap_number_map) {
+        target_kind = FAST_DOUBLE_ELEMENTS;
+      } else {
+        target_kind = FAST_ELEMENTS;
+        break;
       }
     }
   }
+
+  if (target_kind != current_kind) {
+    return TransitionElementsKind(target_kind);
+  }
   return this;
 }
 
 
-MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
-  Object** objects = reinterpret_cast<Object**>(
-      FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
-  return EnsureCanContainElements(objects, elements->length());
+MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
+                                                EnsureElementsMode mode) {
+  if (elements->map() != GetHeap()->fixed_double_array_map()) {
+    ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
+           elements->map() == GetHeap()->fixed_cow_array_map());
+    if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
+      mode = DONT_ALLOW_DOUBLE_ELEMENTS;
+    }
+    Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
+    return EnsureCanContainElements(objects, elements->length(), mode);
+  }
+
+  ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+    return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+  }
+
+  return this;
 }
 
 
@@ -1311,8 +1355,6 @@
   // The write barrier is not used for global property cells.
   ASSERT(!val->IsJSGlobalPropertyCell());
   WRITE_FIELD(this, kValueOffset, val);
-  GetHeap()->incremental_marking()->RecordWrite(
-      this, HeapObject::RawField(this, kValueOffset), val);
 }
 
 
@@ -1703,6 +1745,20 @@
 }
 
 
+void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
+                                              int index,
+                                              Object* value) {
+  ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+  ASSERT(index >= 0 && index < array->length());
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(array, offset, value);
+  Heap* heap = array->GetHeap();
+  if (heap->InNewSpace(value)) {
+    heap->RecordWrite(array->address(), offset);
+  }
+}
+
+
 void FixedArray::NoWriteBarrierSet(FixedArray* array,
                                    int index,
                                    Object* value) {
@@ -1797,12 +1853,12 @@
 }
 
 
-void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
-                                         int first,
-                                         int second) {
+void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
+                                                    int first,
+                                                    int second) {
   Object* tmp = array->get(first);
-  NoWriteBarrierSet(array, first, array->get(second));
-  NoWriteBarrierSet(array, second, tmp);
+  NoIncrementalWriteBarrierSet(array, first, array->get(second));
+  NoIncrementalWriteBarrierSet(array, second, tmp);
 }
 
 
@@ -1914,20 +1970,16 @@
   // Range check.
   ASSERT(descriptor_number < number_of_descriptors());
 
-  // Make sure none of the elements in desc are in new space.
-  ASSERT(!HEAP->InNewSpace(desc->GetKey()));
-  ASSERT(!HEAP->InNewSpace(desc->GetValue()));
-
-  NoWriteBarrierSet(this,
-                    ToKeyIndex(descriptor_number),
-                    desc->GetKey());
+  NoIncrementalWriteBarrierSet(this,
+                               ToKeyIndex(descriptor_number),
+                               desc->GetKey());
   FixedArray* content_array = GetContentArray();
-  NoWriteBarrierSet(content_array,
-                    ToValueIndex(descriptor_number),
-                    desc->GetValue());
-  NoWriteBarrierSet(content_array,
-                    ToDetailsIndex(descriptor_number),
-                    desc->GetDetails().AsSmi());
+  NoIncrementalWriteBarrierSet(content_array,
+                               ToValueIndex(descriptor_number),
+                               desc->GetValue());
+  NoIncrementalWriteBarrierSet(content_array,
+                               ToDetailsIndex(descriptor_number),
+                               desc->GetDetails().AsSmi());
 }
 
 
@@ -1941,15 +1993,16 @@
 }
 
 
-void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
-  NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
+    int first, int second) {
+  NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
   FixedArray* content_array = GetContentArray();
-  NoWriteBarrierSwap(content_array,
-                     ToValueIndex(first),
-                     ToValueIndex(second));
-  NoWriteBarrierSwap(content_array,
-                     ToDetailsIndex(first),
-                     ToDetailsIndex(second));
+  NoIncrementalWriteBarrierSwap(content_array,
+                                ToValueIndex(first),
+                                ToValueIndex(second));
+  NoIncrementalWriteBarrierSwap(content_array,
+                                ToDetailsIndex(first),
+                                ToDetailsIndex(second));
 }
 
 
@@ -4111,7 +4164,8 @@
             (map == GetHeap()->fixed_array_map() ||
              map == GetHeap()->fixed_cow_array_map())) ||
            (kind == FAST_DOUBLE_ELEMENTS &&
-            fixed_array->IsFixedDoubleArray()) ||
+            (fixed_array->IsFixedDoubleArray() ||
+            fixed_array == GetHeap()->empty_fixed_array())) ||
            (kind == DICTIONARY_ELEMENTS &&
             fixed_array->IsFixedArray() &&
             fixed_array->IsDictionary()) ||
@@ -4570,11 +4624,18 @@
 }
 
 
-MaybeObject* JSArray::SetContent(FixedArray* storage) {
-  MaybeObject* maybe_object = EnsureCanContainElements(storage);
-  if (maybe_object->IsFailure()) return maybe_object;
-  set_length(Smi::FromInt(storage->length()));
+MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
+  MaybeObject* maybe_result = EnsureCanContainElements(
+      storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (maybe_result->IsFailure()) return maybe_result;
+  ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
+          GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
+         ((storage->map() != GetHeap()->fixed_double_array_map()) &&
+          ((GetElementsKind() == FAST_ELEMENTS) ||
+           (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+            FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
   set_elements(storage);
+  set_length(Smi::FromInt(storage->length()));
   return this;
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index 1565504..2a56797 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -961,14 +961,14 @@
   // Morph the object to an external string by adjusting the map and
   // reinitializing the fields.
   if (size >= ExternalString::kSize) {
-    this->set_map(
+    this->set_map_no_write_barrier(
         is_symbol
             ? (is_ascii ?  heap->external_symbol_with_ascii_data_map()
                         :  heap->external_symbol_map())
             : (is_ascii ?  heap->external_string_with_ascii_data_map()
                         :  heap->external_string_map()));
   } else {
-    this->set_map(
+    this->set_map_no_write_barrier(
         is_symbol
             ? (is_ascii ?  heap->short_external_symbol_with_ascii_data_map()
                         :  heap->short_external_symbol_map())
@@ -1011,11 +1011,13 @@
   // Morph the object to an external string by adjusting the map and
   // reinitializing the fields.  Use short version if space is limited.
   if (size >= ExternalString::kSize) {
-    this->set_map(is_symbol ? heap->external_ascii_symbol_map()
-                            : heap->external_ascii_string_map());
+    this->set_map_no_write_barrier(
+        is_symbol ? heap->external_ascii_symbol_map()
+                  : heap->external_ascii_string_map());
   } else {
-    this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
-                            : heap->short_external_ascii_string_map());
+    this->set_map_no_write_barrier(
+        is_symbol ? heap->short_external_ascii_symbol_map()
+                  : heap->short_external_ascii_string_map());
   }
   ExternalAsciiString* self = ExternalAsciiString::cast(this);
   self->set_resource(resource);
@@ -1640,8 +1642,6 @@
     String* name,
     JSFunction* function,
     PropertyAttributes attributes) {
-  ASSERT(!GetHeap()->InNewSpace(function));
-
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes);
   Object* new_descriptors;
@@ -1756,7 +1756,7 @@
     // Ensure the descriptor array does not get too big.
     if (map_of_this->instance_descriptors()->number_of_descriptors() <
         DescriptorArray::kMaxNumberOfDescriptors) {
-      if (value->IsJSFunction() && !heap->InNewSpace(value)) {
+      if (value->IsJSFunction()) {
         return AddConstantFunctionProperty(name,
                                            JSFunction::cast(value),
                                            attributes);
@@ -2995,7 +2995,6 @@
       ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
       JSFunction* function =
           JSFunction::cast(target_descriptors->GetValue(number));
-      ASSERT(!HEAP->InNewSpace(function));
       if (value == function) {
         set_map(target_map);
         return value;
@@ -4855,7 +4854,7 @@
           // of the next map and recording the index in the transition array in
           // the map field of the array.
           Map* next = Map::cast(contents->get(i));
-          next->set_map_unsafe(current);
+          next->set_map_no_write_barrier(current);
           *map_or_index_field = Smi::FromInt(i + 2);
           current = next;
           map_done = false;
@@ -4880,7 +4879,7 @@
       Object* perhaps_map = prototype_transitions->get(i);
       if (perhaps_map->IsMap()) {
         Map* next = Map::cast(perhaps_map);
-        next->set_map_unsafe(current);
+        next->set_map_no_write_barrier(current);
         *proto_map_or_index_field =
             Smi::FromInt(i + kProtoTransitionElementsPerEntry);
         current = next;
@@ -4896,7 +4895,7 @@
     // the map field, which is being used to track the traversal and put the
     // correct map (the meta_map) in place while we do the callback.
     Map* prev = current->map();
-    current->set_map_unsafe(meta_map);
+    current->set_map_no_write_barrier(meta_map);
     callback(current, data);
     current = prev;
   }
@@ -5395,7 +5394,9 @@
   AssertNoAllocation no_gc;
   int len = length();
   if (new_length < len) len = new_length;
-  result->set_map(map());
+  // We are taking the map from the old fixed array so the map is sure to
+  // be an immortal immutable object.
+  result->set_map_no_write_barrier(map());
   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < len; i++) {
     result->set(i, get(i), mode);
@@ -5635,7 +5636,7 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      NoWriteBarrierSwapDescriptors(parent_index, child_index);
+      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
       // Now element at child_index could be < its children.
       parent_index = child_index;  // parent_hash remains correct.
     }
@@ -5644,7 +5645,7 @@
   // Extract elements and create sorted array.
   for (int i = len - 1; i > 0; --i) {
     // Put max element at the back of the array.
-    NoWriteBarrierSwapDescriptors(0, i);
+    NoIncrementalWriteBarrierSwapDescriptors(0, i);
     // Shift down the new top element.
     int parent_index = 0;
     const uint32_t parent_hash = GetKey(parent_index)->Hash();
@@ -5660,7 +5661,7 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      NoWriteBarrierSwapDescriptors(parent_index, child_index);
+      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
       parent_index = child_index;
     }
   }
@@ -7639,6 +7640,22 @@
 }
 
 
+#define DECLARE_TAG(ignore1, name, ignore2) name,
+const char* const VisitorSynchronization::kTags[
+    VisitorSynchronization::kNumberOfSyncTags] = {
+  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
+#define DECLARE_TAG(ignore1, ignore2, name) name,
+const char* const VisitorSynchronization::kTagNames[
+    VisitorSynchronization::kNumberOfSyncTags] = {
+  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -8115,9 +8132,20 @@
 static void CopyFastElementsToFast(FixedArray* source,
                                    FixedArray* destination,
                                    WriteBarrierMode mode) {
-  uint32_t count = static_cast<uint32_t>(source->length());
-  for (uint32_t i = 0; i < count; ++i) {
-    destination->set(i, source->get(i), mode);
+  int count = source->length();
+  int copy_size = Min(count, destination->length());
+  if (mode == SKIP_WRITE_BARRIER ||
+      !Page::FromAddress(destination->address())->IsFlagSet(
+          MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)) {
+    Address to = destination->address() + FixedArray::kHeaderSize;
+    Address from = source->address() + FixedArray::kHeaderSize;
+    memcpy(reinterpret_cast<void*>(to),
+           reinterpret_cast<void*>(from),
+           kPointerSize * copy_size);
+  } else {
+    for (int i = 0; i < copy_size; ++i) {
+      destination->set(i, source->get(i), mode);
+    }
   }
 }
 
@@ -8125,11 +8153,14 @@
 static void CopySlowElementsToFast(NumberDictionary* source,
                                    FixedArray* destination,
                                    WriteBarrierMode mode) {
+  int destination_length = destination->length();
   for (int i = 0; i < source->Capacity(); ++i) {
     Object* key = source->KeyAt(i);
     if (key->IsNumber()) {
       uint32_t entry = static_cast<uint32_t>(key->Number());
-      destination->set(entry, source->ValueAt(i), mode);
+      if (entry < static_cast<uint32_t>(destination_length)) {
+        destination->set(entry, source->ValueAt(i), mode);
+      }
     }
   }
 }
@@ -8340,14 +8371,8 @@
 
 
 void JSArray::Expand(int required_size) {
-  Handle<JSArray> self(this);
-  Handle<FixedArray> old_backing(FixedArray::cast(elements()));
-  int old_size = old_backing->length();
-  int new_size = required_size > old_size ? required_size : old_size;
-  Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
-  // Can't use this any more now because we may have had a GC!
-  for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
-  GetIsolate()->factory()->SetContent(self, new_backing);
+  GetIsolate()->factory()->SetElementsCapacityAndLength(
+      Handle<JSArray>(this), required_size, required_size);
 }
 
 
@@ -8501,13 +8526,14 @@
 
 MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
                                                 uint32_t first_arg,
-                                                uint32_t arg_count) {
+                                                uint32_t arg_count,
+                                                EnsureElementsMode mode) {
   // Elements in |Arguments| are ordered backwards (because they're on the
   // stack), but the method that's called here iterates over them in forward
   // direction.
   return EnsureCanContainElements(
       args->arguments() - first_arg - (arg_count - 1),
-      arg_count);
+      arg_count, mode);
 }
 
 
@@ -9459,31 +9485,45 @@
   FixedArrayBase* elms = FixedArrayBase::cast(elements());
   uint32_t capacity = static_cast<uint32_t>(elms->length());
   uint32_t length = capacity;
+
   if (IsJSArray()) {
-    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
-  }
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
-    if (to_kind == FAST_DOUBLE_ELEMENTS) {
-      MaybeObject* maybe_result =
-          SetFastDoubleElementsCapacityAndLength(capacity, length);
-      if (maybe_result->IsFailure()) return maybe_result;
-      return this;
-    } else if (to_kind == FAST_ELEMENTS) {
-      MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
-      Map* new_map;
-      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-      if (FLAG_trace_elements_transitions) {
-        PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
-      }
-      set_map(new_map);
-      return this;
+    Object* raw_length = JSArray::cast(this)->length();
+    if (raw_length->IsUndefined()) {
+      // If length is undefined, then JSArray is being initialized and has no
+      // elements, assume a length of zero.
+      length = 0;
+    } else {
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
     }
-  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+  }
+
+  if ((from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) ||
+      (length == 0)) {
+    MaybeObject* maybe_new_map = GetElementsTransitionMap(to_kind);
+    Map* new_map;
+    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+    if (FLAG_trace_elements_transitions) {
+      PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+    }
+    set_map(new_map);
+    return this;
+  }
+
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    MaybeObject* maybe_result =
+        SetFastDoubleElementsCapacityAndLength(capacity, length);
+    if (maybe_result->IsFailure()) return maybe_result;
+    return this;
+  }
+
+  if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
     MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
         capacity, length, kDontAllowSmiOnlyElements);
     if (maybe_result->IsFailure()) return maybe_result;
     return this;
   }
+
   // This method should never be called for any other case than the ones
   // handled above.
   UNREACHABLE();
@@ -10598,7 +10638,7 @@
     // Transform string to symbol if possible.
     Map* map = heap->SymbolMapForString(string_);
     if (map != NULL) {
-      string_->set_map(map);
+      string_->set_map_no_write_barrier(map);
       ASSERT(string_->IsSymbol());
       return string_;
     }
diff --git a/src/objects.h b/src/objects.h
index 6c88cc0..2b18e67 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1131,7 +1131,10 @@
   // information.
   inline Map* map();
   inline void set_map(Map* value);
-  inline void set_map_unsafe(Map* value);
+  // The no-write-barrier version.  This is OK if the object is white and in
+  // new space, or if the value is an immortal immutable object, like the maps
+  // of primitive (non-JS) objects like strings, heap numbers etc.
+  inline void set_map_no_write_barrier(Map* value);
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
@@ -1319,6 +1322,13 @@
 };
 
 
+enum EnsureElementsMode {
+  DONT_ALLOW_DOUBLE_ELEMENTS,
+  ALLOW_COPIED_DOUBLE_ELEMENTS,
+  ALLOW_CONVERTED_DOUBLE_ELEMENTS
+};
+
+
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
 class JSReceiver: public HeapObject {
@@ -1612,16 +1622,19 @@
 
   inline void ValidateSmiOnlyElements();
 
-  // Makes sure that this object can contain non-smi Object as elements.
-  inline MaybeObject* EnsureCanContainNonSmiElements();
+  // Makes sure that this object can contain HeapObject as elements.
+  inline MaybeObject* EnsureCanContainHeapObjectElements();
 
   // Makes sure that this object can contain the specified elements.
   inline MaybeObject* EnsureCanContainElements(Object** elements,
-                                               uint32_t count);
-  inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+                                               uint32_t count,
+                                               EnsureElementsMode mode);
+  inline MaybeObject* EnsureCanContainElements(FixedArrayBase* elements,
+                                               EnsureElementsMode mode);
   MaybeObject* EnsureCanContainElements(Arguments* arguments,
                                         uint32_t first_arg,
-                                        uint32_t arg_count);
+                                        uint32_t arg_count,
+                                        EnsureElementsMode mode);
 
   // Do we want to keep the elements in fast case when increasing the
   // capacity?
@@ -2121,6 +2134,9 @@
   // Gives access to raw memory which stores the array's data.
   inline Object** data_start();
 
+  inline Object** GetFirstElementAddress();
+  inline bool ContainsOnlySmisOrHoles();
+
   // Copy operations.
   MUST_USE_RESULT inline MaybeObject* Copy();
   MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2187,6 +2203,13 @@
                                        int index,
                                        Object* value);
 
+  // Set operation on FixedArray without incremental write barrier. Can
+  // only be used if the object is guaranteed to be white (whiteness witness
+  // is present).
+  static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
+                                                  int index,
+                                                  Object* value);
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
 };
@@ -2465,12 +2488,12 @@
         NULL_DESCRIPTOR;
   }
   // Swap operation on FixedArray without using write barriers.
-  static inline void NoWriteBarrierSwap(FixedArray* array,
-                                        int first,
-                                        int second);
+  static inline void NoIncrementalWriteBarrierSwap(
+      FixedArray* array, int first, int second);
 
   // Swap descriptor first and second.
-  inline void NoWriteBarrierSwapDescriptors(int first, int second);
+  inline void NoIncrementalWriteBarrierSwapDescriptors(
+      int first, int second);
 
   FixedArray* GetContentArray() {
     return FixedArray::cast(get(kContentArrayIndex));
@@ -3738,11 +3761,6 @@
   DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
   DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
 
-  // Unchecked accessor to be used during GC.
-  FixedArray* UncheckedLiteralArray() {
-    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
-  }
-
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
@@ -7381,7 +7399,7 @@
   MUST_USE_RESULT MaybeObject* Initialize(int capacity);
 
   // Set the content of the array to the content of storage.
-  inline MaybeObject* SetContent(FixedArray* storage);
+  inline MaybeObject* SetContent(FixedArrayBase* storage);
 
   // Casting.
   static inline JSArray* cast(Object* obj);
@@ -7862,6 +7880,34 @@
 #undef DECL_BOOLEAN_ACCESSORS
 #undef DECL_ACCESSORS
 
+#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V)                            \
+  V(kSymbolTable, "symbol_table", "(Symbols)")                          \
+  V(kExternalStringsTable, "external_strings_table", "(External strings)") \
+  V(kStrongRootList, "strong_root_list", "(Strong roots)")              \
+  V(kSymbol, "symbol", "(Symbol)")                                      \
+  V(kBootstrapper, "bootstrapper", "(Bootstrapper)")                    \
+  V(kTop, "top", "(Isolate)")                                           \
+  V(kRelocatable, "relocatable", "(Relocatable)")                       \
+  V(kDebug, "debug", "(Debugger)")                                      \
+  V(kCompilationCache, "compilationcache", "(Compilation cache)")       \
+  V(kHandleScope, "handlescope", "(Handle scope)")                      \
+  V(kBuiltins, "builtins", "(Builtins)")                                \
+  V(kGlobalHandles, "globalhandles", "(Global handles)")                \
+  V(kThreadManager, "threadmanager", "(Thread manager)")                \
+  V(kExtensions, "Extensions", "(Extensions)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+  enum SyncTag {
+    VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
+    kNumberOfSyncTags
+  };
+#undef DECLARE_ENUM
+
+  static const char* const kTags[kNumberOfSyncTags];
+  static const char* const kTagNames[kNumberOfSyncTags];
+};
 
 // Abstract base class for visiting, and optionally modifying, the
 // pointers contained in Objects. Used in GC and serialization/deserialization.
@@ -7917,13 +7963,10 @@
   // Visits a handle that has an embedder-assigned class ID.
   virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
 
-#ifdef DEBUG
   // Intended for serialization/deserialization checking: insert, or
   // check for the presence of, a tag at this position in the stream.
-  virtual void Synchronize(const char* tag) {}
-#else
-  inline void Synchronize(const char* tag) {}
-#endif
+  // Also used for marking up GC roots in heap snapshots.
+  virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
 };
 
 
diff --git a/src/parser.cc b/src/parser.cc
index d834acb..c1681cf 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -2158,6 +2158,20 @@
   // reported (underlining).
   Expect(Token::RETURN, CHECK_OK);
 
+  Token::Value tok = peek();
+  Statement* result;
+  if (scanner().HasAnyLineTerminatorBeforeNext() ||
+      tok == Token::SEMICOLON ||
+      tok == Token::RBRACE ||
+      tok == Token::EOS) {
+    ExpectSemicolon(CHECK_OK);
+    result = new(zone()) ReturnStatement(GetLiteralUndefined());
+  } else {
+    Expression* expr = ParseExpression(true, CHECK_OK);
+    ExpectSemicolon(CHECK_OK);
+    result = new(zone()) ReturnStatement(expr);
+  }
+
   // An ECMAScript program is considered syntactically incorrect if it
   // contains a return statement that is not within the body of a
   // function. See ECMA-262, section 12.9, page 67.
@@ -2170,19 +2184,7 @@
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
     return new(zone()) ExpressionStatement(throw_error);
   }
-
-  Token::Value tok = peek();
-  if (scanner().HasAnyLineTerminatorBeforeNext() ||
-      tok == Token::SEMICOLON ||
-      tok == Token::RBRACE ||
-      tok == Token::EOS) {
-    ExpectSemicolon(CHECK_OK);
-    return new(zone()) ReturnStatement(GetLiteralUndefined());
-  }
-
-  Expression* expr = ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return new(zone()) ReturnStatement(expr);
+  return result;
 }
 
 
@@ -2693,6 +2695,7 @@
     // Assignment to eval or arguments is disallowed in strict mode.
     CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
   }
+  MarkAsLValue(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   int pos = scanner().location().beg_pos;
@@ -2926,6 +2929,7 @@
       // Prefix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
+    MarkAsLValue(expression);
 
     int position = scanner().location().beg_pos;
     return new(zone()) CountOperation(isolate(),
@@ -2961,6 +2965,7 @@
       // Postfix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
+    MarkAsLValue(expression);
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
@@ -3375,6 +3380,7 @@
       isolate()->factory()->NewFixedArray(values->length(), TENURED);
   Handle<FixedDoubleArray> double_literals;
   ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+  bool has_only_undefined_values = true;
 
   // Fill in the literals.
   bool is_simple = true;
@@ -3398,6 +3404,7 @@
       // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary.  Always remember
       // the tagged value, no matter what the ElementsKind is in case we
       // ultimately end up in FAST_ELEMENTS.
+      has_only_undefined_values = false;
       object_literals->set(i, *boilerplate_value);
       if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
         // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@@ -3436,6 +3443,13 @@
     }
   }
 
+  // Very small array literals that don't have a concrete hint about their type
+  // from a constant value should default to the slow case to avoid lots of
+  // elements transitions on really small objects.
+  if (has_only_undefined_values && values->length() <= 2) {
+    elements_kind = FAST_ELEMENTS;
+  }
+
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
   if (is_simple && depth == 1 && values->length() > 0 &&
@@ -4479,6 +4493,15 @@
 }
 
 
+void Parser::MarkAsLValue(Expression* expression) {
+  VariableProxy* proxy = expression != NULL
+      ? expression->AsVariableProxy()
+      : NULL;
+
+  if (proxy != NULL) proxy->MarkAsLValue();
+}
+
+
 // Checks LHS expression for assignment and prefix/postfix increment/decrement
 // in strict mode.
 void Parser::CheckStrictModeLValue(Expression* expression,
diff --git a/src/parser.h b/src/parser.h
index 75f8e10..146d7bb 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -661,6 +661,11 @@
                                                bool* is_set,
                                                bool* ok);
 
+  // Determine if the expression is a variable proxy and mark it as being used
+  // in an assignment or with a increment/decrement operator. This is currently
+  // used on for the statically checking assignments to harmony const bindings.
+  void MarkAsLValue(Expression* expression);
+
   // Strict mode validation of LValue expressions
   void CheckStrictModeLValue(Expression* expression,
                              const char* error,
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index cccf0ac..a59d0bb 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -70,6 +70,11 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
 #ifndef __CYGWIN__
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 8771c43..822f360 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -889,6 +889,11 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
 void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
   VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
diff --git a/src/platform.h b/src/platform.h
index f84b6b1..127f788 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -172,6 +172,10 @@
                         bool is_executable);
   static void Free(void* address, const size_t size);
 
+  // This is the granularity at which the ProtectCode(...) call can set page
+  // permissions.
+  static intptr_t CommitPageSize();
+
   // Mark code segments non-writable.
   static void ProtectCode(void* address, const size_t size);
 
diff --git a/src/preparser.cc b/src/preparser.cc
index 49cadb6..b36f4fa 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -627,6 +627,7 @@
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, ok);
+  if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
   return Statement::Default();
 }
 
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 88d6e87..7a70b01 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -95,6 +95,26 @@
 }
 
 
+uint64_t HeapObjectsMap::GetNthGcSubrootId(int delta) {
+  return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+  return reinterpret_cast<HeapObject*>(
+      reinterpret_cast<char*>(kFirstGcSubrootObject) +
+      delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+  return static_cast<int>(
+      (reinterpret_cast<char*>(subroot) -
+       reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+      HeapObjectsMap::kObjectIdStep);
+}
+
+
 uint64_t HeapEntry::id() {
   union {
     Id stored_id;
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 5626aca..a46122b 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -938,7 +938,7 @@
 
 
 void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
-  ASSERT(type == kElement || type == kHidden);
+  ASSERT(type == kElement || type == kHidden || type == kWeak);
   child_index_ = child_index;
   type_ = type;
   index_ = index;
@@ -1053,8 +1053,11 @@
 }
 
 
-void HeapEntry::Print(int max_depth, int indent) {
-  OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
+void HeapEntry::Print(
+    const char* prefix, const char* edge_name, int max_depth, int indent) {
+  OS::Print("%6d %7d @%6llu %*c %s%s: ",
+            self_size(), RetainedSize(false), id(),
+            indent, ' ', prefix, edge_name);
   if (type() != kString) {
     OS::Print("%s %.40s\n", TypeAsString(), name_);
   } else {
@@ -1073,29 +1076,40 @@
   Vector<HeapGraphEdge> ch = children();
   for (int i = 0; i < ch.length(); ++i) {
     HeapGraphEdge& edge = ch[i];
+    const char* edge_prefix = "";
+    ScopedVector<char> index(64);
+    const char* edge_name = index.start();
     switch (edge.type()) {
       case HeapGraphEdge::kContextVariable:
-        OS::Print("  %*c #%s: ", indent, ' ', edge.name());
+        edge_prefix = "#";
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kElement:
-        OS::Print("  %*c %d: ", indent, ' ', edge.index());
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       case HeapGraphEdge::kInternal:
-        OS::Print("  %*c $%s: ", indent, ' ', edge.name());
+        edge_prefix = "$";
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kProperty:
-        OS::Print("  %*c %s: ", indent, ' ', edge.name());
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kHidden:
-        OS::Print("  %*c $%d: ", indent, ' ', edge.index());
+        edge_prefix = "$";
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       case HeapGraphEdge::kShortcut:
-        OS::Print("  %*c ^%s: ", indent, ' ', edge.name());
+        edge_prefix = "^";
+        edge_name = edge.name();
+        break;
+      case HeapGraphEdge::kWeak:
+        edge_prefix = "w";
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       default:
-        OS::Print("!!! unknown edge type: %d ", edge.type());
+        OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
     }
-    edge.to()->Print(max_depth, indent + 2);
+    edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
   }
 }
 
@@ -1215,6 +1229,9 @@
   STATIC_ASSERT(
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
+  for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+    gc_subroot_entries_[i] = NULL;
+  }
 }
 
 HeapSnapshot::~HeapSnapshot() {
@@ -1270,6 +1287,21 @@
 }
 
 
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
+                                           int children_count,
+                                           int retainers_count) {
+  ASSERT(gc_subroot_entries_[tag] == NULL);
+  ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+  return (gc_subroot_entries_[tag] = AddEntry(
+      HeapEntry::kObject,
+      VisitorSynchronization::kTagNames[tag],
+      HeapObjectsMap::GetNthGcSubrootId(tag),
+      0,
+      children_count,
+      retainers_count));
+}
+
+
 HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
                                                  int retainers_count) {
   ASSERT(natives_root_entry_ == NULL);
@@ -1355,17 +1387,22 @@
 
 
 void HeapSnapshot::Print(int max_depth) {
-  root()->Print(max_depth, 0);
+  root()->Print("", "", max_depth, 0);
 }
 
 
 // We split IDs on evens for embedder objects (see
 // HeapObjectsMap::GenerateId) and odds for native objects.
 const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
-const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
-// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
+const uint64_t HeapObjectsMap::kGcRootsObjectId =
+    HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kNativesRootObjectId =
+    HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kGcRootsFirstSubrootId =
+    HeapObjectsMap::kNativesRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kFirstAvailableObjectId =
+    HeapObjectsMap::kGcRootsFirstSubrootId +
+    VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
 
 HeapObjectsMap::HeapObjectsMap()
     : initial_fill_mode_(true),
@@ -1391,7 +1428,7 @@
     if (existing != 0) return existing;
   }
   uint64_t id = next_id_;
-  next_id_ += 2;
+  next_id_ += kObjectIdStep;
   AddEntry(addr, id);
   return id;
 }
@@ -1684,6 +1721,12 @@
 HeapObject *const V8HeapExplorer::kGcRootsObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject *const V8HeapExplorer::kFirstGcSubrootObject =
+    reinterpret_cast<HeapObject*>(
+        static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject *const V8HeapExplorer::kLastGcSubrootObject =
+    reinterpret_cast<HeapObject*>(
+        static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
 
 
 V8HeapExplorer::V8HeapExplorer(
@@ -1716,6 +1759,11 @@
     return snapshot_->AddRootEntry(children_count);
   } else if (object == kGcRootsObject) {
     return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+  } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+    return snapshot_->AddGcSubrootEntry(
+        GetGcSubrootOrder(object),
+        children_count,
+        retainers_count);
   } else if (object->IsJSGlobalObject()) {
     const char* tag = objects_tags_.GetTag(object);
     const char* name = collection_->names()->GetName(
@@ -1779,6 +1827,18 @@
                         : "",
                     children_count,
                     retainers_count);
+  } else if (object->IsGlobalContext()) {
+    return AddEntry(object,
+                    HeapEntry::kHidden,
+                    "system / GlobalContext",
+                    children_count,
+                    retainers_count);
+  } else if (object->IsContext()) {
+    return AddEntry(object,
+                    HeapEntry::kHidden,
+                    "system / Context",
+                    children_count,
+                    retainers_count);
   } else if (object->IsFixedArray() ||
              object->IsFixedDoubleArray() ||
              object->IsByteArray() ||
@@ -1818,9 +1878,38 @@
 }
 
 
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+  GcSubrootsEnumerator(
+      SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+      : filler_(filler),
+        explorer_(explorer),
+        previous_object_count_(0),
+        object_count_(0) {
+  }
+  void VisitPointers(Object** start, Object** end) {
+    object_count_ += end - start;
+  }
+  void Synchronize(VisitorSynchronization::SyncTag tag) {
+    // Skip empty subroots.
+    if (previous_object_count_ != object_count_) {
+      previous_object_count_ = object_count_;
+      filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+    }
+  }
+ private:
+  SnapshotFillerInterface* filler_;
+  V8HeapExplorer* explorer_;
+  intptr_t previous_object_count_;
+  intptr_t object_count_;
+};
+
+
 void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
   filler->AddEntry(kInternalRootObject, this);
   filler->AddEntry(kGcRootsObject, this);
+  GcSubrootsEnumerator enumerator(filler, this);
+  heap_->IterateRoots(&enumerator, VISIT_ALL);
 }
 
 
@@ -1939,6 +2028,11 @@
                            "literals_or_bindings",
                            js_fun->literals_or_bindings(),
                            JSFunction::kLiteralsOffset);
+      for (int i = JSFunction::kNonWeakFieldsEndOffset;
+           i < JSFunction::kSize;
+           i += kPointerSize) {
+        SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+      }
     }
     TagObject(js_obj->properties(), "(object properties)");
     SetInternalReference(obj, entry,
@@ -1965,8 +2059,14 @@
               "(context func. result caches)");
     TagObject(context->normalized_map_cache(), "(context norm. map cache)");
     TagObject(context->runtime_context(), "(runtime context)");
-    TagObject(context->map_cache(), "(context map cache)");
     TagObject(context->data(), "(context data)");
+    for (int i = Context::FIRST_WEAK_SLOT;
+         i < Context::GLOBAL_CONTEXT_SLOTS;
+         ++i) {
+      SetWeakReference(obj, entry,
+                       i, context->get(i),
+                       FixedArray::OffsetOfElementAt(i));
+    }
   } else if (obj->IsMap()) {
     Map* map = Map::cast(obj);
     SetInternalReference(obj, entry,
@@ -2009,6 +2109,9 @@
     SetInternalReference(obj, entry,
                          "script", shared->script(),
                          SharedFunctionInfo::kScriptOffset);
+    SetWeakReference(obj, entry,
+                     1, shared->initial_map(),
+                     SharedFunctionInfo::kInitialMapOffset);
   } else if (obj->IsScript()) {
     Script* script = Script::cast(obj);
     SetInternalReference(obj, entry,
@@ -2235,15 +2338,66 @@
 
 
 class RootsReferencesExtractor : public ObjectVisitor {
- public:
-  explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
-      : explorer_(explorer) {
-  }
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
-  }
  private:
-  V8HeapExplorer* explorer_;
+  struct IndexTag {
+    IndexTag(int index, VisitorSynchronization::SyncTag tag)
+        : index(index), tag(tag) { }
+    int index;
+    VisitorSynchronization::SyncTag tag;
+  };
+
+ public:
+  RootsReferencesExtractor()
+      : collecting_all_references_(false),
+        previous_reference_count_(0) {
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    if (collecting_all_references_) {
+      for (Object** p = start; p < end; p++) all_references_.Add(*p);
+    } else {
+      for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+    }
+  }
+
+  void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+  void FillReferences(V8HeapExplorer* explorer) {
+    ASSERT(strong_references_.length() <= all_references_.length());
+    for (int i = 0; i < reference_tags_.length(); ++i) {
+      explorer->SetGcRootsReference(reference_tags_[i].tag);
+    }
+    int strong_index = 0, all_index = 0, tags_index = 0;
+    while (all_index < all_references_.length()) {
+      if (strong_index < strong_references_.length() &&
+          strong_references_[strong_index] == all_references_[all_index]) {
+        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+                                        false,
+                                        all_references_[all_index++]);
+        ++strong_index;
+      } else {
+        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+                                        true,
+                                        all_references_[all_index++]);
+      }
+      if (reference_tags_[tags_index].index == all_index) ++tags_index;
+    }
+  }
+
+  void Synchronize(VisitorSynchronization::SyncTag tag) {
+    if (collecting_all_references_ &&
+        previous_reference_count_ != all_references_.length()) {
+      previous_reference_count_ = all_references_.length();
+      reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+    }
+  }
+
+ private:
+  bool collecting_all_references_;
+  List<Object*> strong_references_;
+  List<Object*> all_references_;
+  int previous_reference_count_;
+  List<IndexTag> reference_tags_;
 };
 
 
@@ -2268,8 +2422,11 @@
     return false;
   }
   SetRootGcRootsReference();
-  RootsReferencesExtractor extractor(this);
+  RootsReferencesExtractor extractor;
+  heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  extractor.SetCollectingAllReferences();
   heap_->IterateRoots(&extractor, VISIT_ALL);
+  extractor.FillReferences(this);
   filler_ = NULL;
   return progress_->ProgressReport(false);
 }
@@ -2359,6 +2516,24 @@
 }
 
 
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+                                      HeapEntry* parent_entry,
+                                      int index,
+                                      Object* child_obj,
+                                      int field_offset) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+                                 parent_obj,
+                                 parent_entry,
+                                 index,
+                                 child_obj,
+                                 child_entry);
+    IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+  }
+}
+
+
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
                                           HeapEntry* parent_entry,
                                           String* reference_name,
@@ -2421,12 +2596,21 @@
 }
 
 
-void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+  filler_->SetIndexedAutoIndexReference(
+      HeapGraphEdge::kElement,
+      kGcRootsObject, snapshot_->gc_roots(),
+      GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+    VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedAutoIndexReference(
-        HeapGraphEdge::kElement,
-        kGcRootsObject, snapshot_->gc_roots(),
+        is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+        GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
         child_obj, child_entry);
   }
 }
@@ -3235,7 +3419,8 @@
   writer_->AddNumber(edge->type());
   writer_->AddCharacter(',');
   if (edge->type() == HeapGraphEdge::kElement
-      || edge->type() == HeapGraphEdge::kHidden) {
+      || edge->type() == HeapGraphEdge::kHidden
+      || edge->type() == HeapGraphEdge::kWeak) {
     writer_->AddNumber(edge->index());
   } else {
     writer_->AddNumber(GetStringId(edge->name()));
@@ -3315,7 +3500,8 @@
                     "," JSON_S("property")
                     "," JSON_S("internal")
                     "," JSON_S("hidden")
-                    "," JSON_S("shortcut"))
+                    "," JSON_S("shortcut")
+                    "," JSON_S("weak"))
                 "," JSON_S("string_or_number")
                 "," JSON_S("node"))))));
 #undef JSON_S
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 44be3db..b47ce82 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -455,7 +455,8 @@
     kProperty = v8::HeapGraphEdge::kProperty,
     kInternal = v8::HeapGraphEdge::kInternal,
     kHidden = v8::HeapGraphEdge::kHidden,
-    kShortcut = v8::HeapGraphEdge::kShortcut
+    kShortcut = v8::HeapGraphEdge::kShortcut,
+    kWeak = v8::HeapGraphEdge::kWeak
   };
 
   HeapGraphEdge() { }
@@ -465,7 +466,7 @@
 
   Type type() { return static_cast<Type>(type_); }
   int index() {
-    ASSERT(type_ == kElement || type_ == kHidden);
+    ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
     return index_;
   }
   const char* name() {
@@ -588,7 +589,8 @@
   int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
   int RetainedSize(bool exact);
 
-  void Print(int max_depth, int indent);
+  void Print(
+      const char* prefix, const char* edge_name, int max_depth, int indent);
 
   Handle<HeapObject> GetHeapObject();
 
@@ -661,6 +663,7 @@
   HeapEntry* root() { return root_entry_; }
   HeapEntry* gc_roots() { return gc_roots_entry_; }
   HeapEntry* natives_root() { return natives_root_entry_; }
+  HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
   List<HeapEntry*>* entries() { return &entries_; }
   int raw_entries_size() { return raw_entries_size_; }
 
@@ -674,6 +677,9 @@
                       int retainers_count);
   HeapEntry* AddRootEntry(int children_count);
   HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+  HeapEntry* AddGcSubrootEntry(int tag,
+                               int children_count,
+                               int retainers_count);
   HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
   void ClearPaint();
   HeapEntry* GetEntryById(uint64_t id);
@@ -695,6 +701,7 @@
   HeapEntry* root_entry_;
   HeapEntry* gc_roots_entry_;
   HeapEntry* natives_root_entry_;
+  HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
   char* raw_entries_;
   List<HeapEntry*> entries_;
   bool entries_sorted_;
@@ -716,10 +723,13 @@
   void MoveObject(Address from, Address to);
 
   static uint64_t GenerateId(v8::RetainedObjectInfo* info);
+  static inline uint64_t GetNthGcSubrootId(int delta);
 
+  static const int kObjectIdStep = 2;
   static const uint64_t kInternalRootObjectId;
   static const uint64_t kGcRootsObjectId;
   static const uint64_t kNativesRootObjectId;
+  static const uint64_t kGcRootsFirstSubrootId;
   static const uint64_t kFirstAvailableObjectId;
 
  private:
@@ -969,6 +979,11 @@
                           HeapEntry* parent,
                           int index,
                           Object* child);
+  void SetWeakReference(HeapObject* parent_obj,
+                        HeapEntry* parent_entry,
+                        int index,
+                        Object* child_obj,
+                        int field_offset);
   void SetPropertyReference(HeapObject* parent_obj,
                             HeapEntry* parent,
                             String* reference_name,
@@ -981,11 +996,16 @@
                                     Object* child);
   void SetRootShortcutReference(Object* child);
   void SetRootGcRootsReference();
-  void SetGcRootsReference(Object* child);
+  void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+  void SetGcSubrootReference(
+      VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
   void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
 
+  static inline HeapObject* GetNthGcSubrootObject(int delta);
+  static inline int GetGcSubrootOrder(HeapObject* subroot);
+
   Heap* heap_;
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -994,8 +1014,11 @@
   HeapObjectsSet objects_tags_;
 
   static HeapObject* const kGcRootsObject;
+  static HeapObject* const kFirstGcSubrootObject;
+  static HeapObject* const kLastGcSubrootObject;
 
   friend class IndexedReferencesExtractor;
+  friend class GcSubrootsEnumerator;
   friend class RootsReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
diff --git a/src/runtime.cc b/src/runtime.cc
index a2e569b..f3adc51 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -625,6 +625,7 @@
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index), isolate);
   if (*boilerplate == isolate->heap()->undefined_value()) {
+    ASSERT(*elements != isolate->heap()->empty_fixed_array());
     boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
@@ -4651,6 +4652,7 @@
   if (value->IsNumber()) {
     ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
     TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+    TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
     ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
     FixedDoubleArray* double_array =
         FixedDoubleArray::cast(object->elements());
@@ -4660,6 +4662,7 @@
     ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
            elements_kind == FAST_DOUBLE_ELEMENTS);
     TransitionElementsKind(object, FAST_ELEMENTS);
+    TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
     FixedArray* object_array =
         FixedArray::cast(object->elements());
     object_array->set(store_index, *value);
@@ -6293,7 +6296,7 @@
   int part_count = indices.length();
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
-  MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+  MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
   if (maybe_result->IsFailure()) return maybe_result;
   result->set_length(Smi::FromInt(part_count));
 
@@ -6669,7 +6672,7 @@
   // This assumption is used by the slice encoding in one or two smis.
   ASSERT(Smi::kMaxValue >= String::kMaxLength);
 
-  MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+  MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
   if (maybe_result->IsFailure()) return maybe_result;
 
   int special_length = special->length();
@@ -7395,7 +7398,8 @@
   return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
 }
 
-
+// Slow version of Math.pow.  We check for fast paths for special cases.
+// Used if SSE2/VFP3 is not available.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -7411,22 +7415,36 @@
   }
 
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-  return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
+  int y_int = static_cast<int>(y);
+  double result;
+  if (y == y_int) {
+    result = power_double_int(x, y_int);  // Returns 1 if exponent is 0.
+  } else  if (y == 0.5) {
+    result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0);  // Convert -0 to +0.
+  } else if (y == -0.5) {
+    result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0);  // Convert -0 to +0.
+  } else {
+    result = power_double_double(x, y);
+  }
+  if (isnan(result)) return isolate->heap()->nan_value();
+  return isolate->heap()->AllocateHeapNumber(result);
 }
 
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
+// Fast version of Math.pow if we know that y is not an integer and y is not
+// -0.5 or 0.5.  Used as slow case from fullcodegen.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
+  isolate->counters()->math_pow()->Increment();
+
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (y == 0) {
     return Smi::FromInt(1);
-  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-    return isolate->heap()->nan_value();
   } else {
-    return isolate->heap()->AllocateHeapNumber(pow(x, y));
+    double result = power_double_double(x, y);
+    if (isnan(result)) return isolate->heap()->nan_value();
+    return isolate->heap()->AllocateHeapNumber(result);
   }
 }
 
@@ -7991,7 +8009,7 @@
 
     AssertNoAllocation no_gc;
     FixedArray* array = reinterpret_cast<FixedArray*>(obj);
-    array->set_map(isolate->heap()->fixed_array_map());
+    array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
     array->set_length(length);
 
     WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -8111,7 +8129,8 @@
   for (int j = 0; j < argc; j++, i++) {
     new_bindings->set(i, *arguments[j + 1]);
   }
-  new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
+  new_bindings->set_map_no_write_barrier(
+      isolate->heap()->fixed_cow_array_map());
   bound_function->set_function_bindings(*new_bindings);
 
   // Update length.
@@ -9299,7 +9318,7 @@
   CONVERT_ARG_CHECKED(JSArray, output, 1);
 
   MaybeObject* maybe_result_array =
-      output->EnsureCanContainNonSmiElements();
+      output->EnsureCanContainHeapObjectElements();
   if (maybe_result_array->IsFailure()) return maybe_result_array;
   RUNTIME_ASSERT(output->HasFastElements());
 
diff --git a/src/scopes.cc b/src/scopes.cc
index e05ca17..4a6d0a7 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -31,6 +31,7 @@
 
 #include "bootstrapper.h"
 #include "compiler.h"
+#include "messages.h"
 #include "scopeinfo.h"
 
 #include "allocation-inl.h"
@@ -284,8 +285,25 @@
   }
 #endif
 
+  if (FLAG_harmony_scoping) {
+    VariableProxy* proxy = scope->CheckAssignmentToConst();
+    if (proxy != NULL) {
+      // Found an assignment to const. Throw a syntax error.
+      MessageLocation location(info->script(),
+                               proxy->position(),
+                               proxy->position());
+      Isolate* isolate = info->isolate();
+      Factory* factory = isolate->factory();
+      Handle<JSArray> array = factory->NewJSArray(0);
+      Handle<Object> result =
+          factory->NewSyntaxError("harmony_const_assign", array);
+      isolate->Throw(*result, &location);
+      return false;
+    }
+  }
+
   info->SetScope(scope);
-  return true;  // Can not fail.
+  return true;
 }
 
 
@@ -554,6 +572,29 @@
 }
 
 
+VariableProxy* Scope::CheckAssignmentToConst() {
+  // Check this scope.
+  if (is_extended_mode()) {
+    for (int i = 0; i < unresolved_.length(); i++) {
+      ASSERT(unresolved_[i]->var() != NULL);
+      if (unresolved_[i]->var()->is_const_mode() &&
+          unresolved_[i]->IsLValue()) {
+        return unresolved_[i];
+      }
+    }
+  }
+
+  // Check inner scopes.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
+    if (proxy != NULL) return proxy;
+  }
+
+  // No assignments to const found.
+  return NULL;
+}
+
+
 void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
                                          ZoneList<Variable*>* context_locals) {
   ASSERT(stack_locals != NULL);
diff --git a/src/scopes.h b/src/scopes.h
index 523a251..af0449e 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -187,6 +187,11 @@
   // scope over a let binding of the same name.
   Declaration* CheckConflictingVarDeclarations();
 
+  // For harmony block scoping mode: Check if the scope has variable proxies
+  // that are used as lvalues and point to const variables. Assumes that scopes
+  // have been analyzed and variables been resolved.
+  VariableProxy* CheckAssignmentToConst();
+
   // ---------------------------------------------------------------------------
   // Scope-specific info.
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 1be81dd..a5712a0 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1656,14 +1656,14 @@
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
   if (size_in_bytes > FreeSpace::kHeaderSize) {
-    set_map_unsafe(heap->raw_unchecked_free_space_map());
+    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
     // Can't use FreeSpace::cast because it fails during deserialization.
     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
     this_as_free_space->set_size(size_in_bytes);
   } else if (size_in_bytes == kPointerSize) {
-    set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
+    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
+    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 7c8b5f2..c0315f2 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -41,6 +41,7 @@
       old_start_(NULL),
       old_limit_(NULL),
       old_top_(NULL),
+      old_reserved_limit_(NULL),
       old_buffer_is_sorted_(false),
       old_buffer_is_filtered_(false),
       during_gc_(false),
@@ -59,10 +60,25 @@
       reinterpret_cast<uintptr_t>(virtual_memory_->address());
   start_ =
       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
-  limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+  limit_ = start_ + (kStoreBufferSize / kPointerSize);
 
-  old_top_ = old_start_ = new Address[kOldStoreBufferLength];
-  old_limit_ = old_start_ + kOldStoreBufferLength;
+  old_virtual_memory_ =
+      new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+  old_top_ = old_start_ =
+      reinterpret_cast<Address*>(old_virtual_memory_->address());
+  // Don't know the alignment requirements of the OS, but it is certainly not
+  // less than 0xfff.
+  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+  ASSERT(initial_length > 0);
+  ASSERT(initial_length <= kOldStoreBufferLength);
+  old_limit_ = old_start_ + initial_length;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+  CHECK(old_virtual_memory_->Commit(
+            reinterpret_cast<void*>(old_start_),
+            (old_limit_ - old_start_) * kPointerSize,
+            false));
 
   ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
   ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -76,9 +92,9 @@
   ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
          0);
 
-  virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                          kStoreBufferSize,
-                          false);  // Not executable.
+  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                                kStoreBufferSize,
+                                false));  // Not executable.
   heap_->public_set_store_buffer_top(start_);
 
   hash_map_1_ = new uintptr_t[kHashMapLength];
@@ -90,10 +106,10 @@
 
 void StoreBuffer::TearDown() {
   delete virtual_memory_;
+  delete old_virtual_memory_;
   delete[] hash_map_1_;
   delete[] hash_map_2_;
-  delete[] old_start_;
-  old_start_ = old_top_ = old_limit_ = NULL;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
   start_ = limit_ = NULL;
   heap_->public_set_store_buffer_top(start_);
 }
@@ -150,7 +166,18 @@
 }
 
 
-void StoreBuffer::HandleFullness() {
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+  while (old_limit_ - old_top_ < space_needed &&
+         old_limit_ < old_reserved_limit_) {
+    size_t grow = old_limit_ - old_start_;  // Double size.
+    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+                                      grow * kPointerSize,
+                                      false));
+    old_limit_ += grow;
+  }
+
+  if (old_limit_ - old_top_ >= space_needed) return;
+
   if (old_buffer_is_filtered_) return;
   ASSERT(may_move_store_buffer_entries_);
   Compact();
@@ -645,9 +672,7 @@
   // the worst case (compaction doesn't eliminate any pointers).
   ASSERT(top <= limit_);
   heap_->public_set_store_buffer_top(start_);
-  if (top - start_ > old_limit_ - old_top_) {
-    HandleFullness();
-  }
+  EnsureSpace(top - start_);
   ASSERT(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
   // duplicates.  In the interest of speed this is a lossy operation.  Some
@@ -688,9 +713,7 @@
 
 
 void StoreBuffer::CheckForFullBuffer() {
-  if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
-    HandleFullness();
-  }
+  EnsureSpace(kStoreBufferSize * 2);
 }
 
 } }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
index e5e50ae..ab25938 100644
--- a/src/store-buffer.h
+++ b/src/store-buffer.h
@@ -109,7 +109,7 @@
   // been promoted.  Rebuilds the store buffer completely if it overflowed.
   void SortUniq();
 
-  void HandleFullness();
+  void EnsureSpace(intptr_t space_needed);
   void Verify();
 
   bool PrepareForIteration();
@@ -134,6 +134,8 @@
   Address* old_start_;
   Address* old_limit_;
   Address* old_top_;
+  Address* old_reserved_limit_;
+  VirtualMemory* old_virtual_memory_;
 
   bool old_buffer_is_sorted_;
   bool old_buffer_is_filtered_;
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 8b6e28f..0d0105c 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -184,7 +184,7 @@
 Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
                                             Handle<JSObject> receiver,
                                             Handle<JSObject> holder,
-                                            Handle<Object> value) {
+                                            Handle<JSFunction> value) {
   ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
@@ -266,7 +266,7 @@
 Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
                                                  Handle<JSObject> receiver,
                                                  Handle<JSObject> holder,
-                                                 Handle<Object> value) {
+                                                 Handle<JSFunction> value) {
   ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index cc42e05..f55a36d 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -92,7 +92,7 @@
   Handle<Code> ComputeLoadConstant(Handle<String> name,
                                    Handle<JSObject> receiver,
                                    Handle<JSObject> holder,
-                                   Handle<Object> value);
+                                   Handle<JSFunction> value);
 
   Handle<Code> ComputeLoadInterceptor(Handle<String> name,
                                       Handle<JSObject> receiver,
@@ -121,7 +121,7 @@
   Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
                                         Handle<JSObject> receiver,
                                         Handle<JSObject> holder,
-                                        Handle<Object> value);
+                                        Handle<JSFunction> value);
 
   Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
                                            Handle<JSObject> receiver,
@@ -518,7 +518,7 @@
                             Register scratch1,
                             Register scratch2,
                             Register scratch3,
-                            Handle<Object> value,
+                            Handle<JSFunction> value,
                             Handle<String> name,
                             Label* miss);
 
@@ -568,7 +568,7 @@
 
   Handle<Code> CompileLoadConstant(Handle<JSObject> object,
                                    Handle<JSObject> holder,
-                                   Handle<Object> value,
+                                   Handle<JSFunction> value,
                                    Handle<String> name);
 
   Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
@@ -603,7 +603,7 @@
   Handle<Code> CompileLoadConstant(Handle<String> name,
                                    Handle<JSObject> object,
                                    Handle<JSObject> holder,
-                                   Handle<Object> value);
+                                   Handle<JSFunction> value);
 
   Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
                                       Handle<JSObject> holder,
diff --git a/src/type-info.cc b/src/type-info.cc
index c781c61..e722d14 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -259,6 +259,7 @@
     case CompareIC::STRINGS:
       return TypeInfo::String();
     case CompareIC::OBJECTS:
+    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
@@ -278,6 +279,19 @@
 }
 
 
+Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  if (!object->IsCode()) return Handle<Map>::null();
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return Handle<Map>::null();
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  if (state != CompareIC::KNOWN_OBJECTS) {
+    return Handle<Map>::null();
+  }
+  return Handle<Map>(code->FindFirstMap());
+}
+
+
 TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
   Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
@@ -367,6 +381,7 @@
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
     case CompareIC::OBJECTS:
+    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
diff --git a/src/type-info.h b/src/type-info.h
index 7c9c05e..eba0987 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -273,6 +273,7 @@
   TypeInfo BinaryType(BinaryOperation* expr);
   TypeInfo CompareType(CompareOperation* expr);
   bool IsSymbolCompare(CompareOperation* expr);
+  Handle<Map> GetCompareMap(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
   TypeInfo IncrementType(CountOperation* expr);
 
diff --git a/src/v8natives.js b/src/v8natives.js
index 11b1a7e..1d54e28 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -660,6 +660,21 @@
 }
 
 
+// ES5 section 8.12.7.
+function Delete(obj, p, should_throw) {
+  var desc = GetOwnProperty(obj, p);
+  if (IS_UNDEFINED(desc)) return true;
+  if (desc.isConfigurable()) {
+    %DeleteProperty(obj, p, 0);
+    return true;
+  } else if (should_throw) {
+    throw MakeTypeError("define_disallowed", [p]);
+  } else {
+    return;
+  }
+}
+
+
 // Harmony proxies.
 function DefineProxyProperty(obj, p, attributes, should_throw) {
   var handler = %GetHandler(obj);
@@ -677,12 +692,7 @@
 
 
 // ES5 8.12.9.
-function DefineOwnProperty(obj, p, desc, should_throw) {
-  if (%IsJSProxy(obj)) {
-    var attributes = FromGenericPropertyDescriptor(desc);
-    return DefineProxyProperty(obj, p, attributes, should_throw);
-  }
-
+function DefineObjectProperty(obj, p, desc, should_throw) {
   var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
   // A false value here means that access checks failed.
   if (current_or_access === false) return void 0;
@@ -846,6 +856,90 @@
 }
 
 
+// ES5 section 15.4.5.1.
+function DefineArrayProperty(obj, p, desc, should_throw) {
+  // Note that the length of an array is not actually stored as part of the
+  // property, hence we use generated code throughout this function instead of
+  // DefineObjectProperty() to modify its value.
+
+  // Step 3 - Special handling for length property.
+  if (p == "length") {
+    var length = obj.length;
+    if (!desc.hasValue()) {
+      return DefineObjectProperty(obj, "length", desc, should_throw);
+    }
+    var new_length = ToUint32(desc.getValue());
+    if (new_length != ToNumber(desc.getValue())) {
+      throw new $RangeError('defineProperty() array length out of range');
+    }
+    var length_desc = GetOwnProperty(obj, "length");
+    if (new_length != length && !length_desc.isWritable()) {
+      if (should_throw) {
+        throw MakeTypeError("redefine_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    var threw = false;
+    while (new_length < length--) {
+      if (!Delete(obj, ToString(length), false)) {
+        new_length = length + 1;
+        threw = true;
+        break;
+      }
+    }
+    // Make sure the below call to DefineObjectProperty() doesn't overwrite
+    // any magic "length" property by removing the value.
+    obj.length = new_length;
+    desc.value_ = void 0;
+    desc.hasValue_ = false;
+    if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+      if (should_throw) {
+        throw MakeTypeError("redefine_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // Step 4 - Special handling for array index.
+  var index = ToUint32(p);
+  if (index == ToNumber(p) && index != 4294967295) {
+    var length = obj.length;
+    var length_desc = GetOwnProperty(obj, "length");
+    if ((index >= length && !length_desc.isWritable()) ||
+        !DefineObjectProperty(obj, p, desc, true)) {
+      if (should_throw) {
+        throw MakeTypeError("define_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    if (index >= length) {
+      obj.length = index + 1;
+    }
+    return true;
+  }
+
+  // Step 5 - Fallback to default implementation.
+  return DefineObjectProperty(obj, p, desc, should_throw);
+}
+
+
+// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+  if (%IsJSProxy(obj)) {
+    var attributes = FromGenericPropertyDescriptor(desc);
+    return DefineProxyProperty(obj, p, attributes, should_throw);
+  } else if (IS_ARRAY(obj)) {
+    return DefineArrayProperty(obj, p, desc, should_throw);
+  } else {
+    return DefineObjectProperty(obj, p, desc, should_throw);
+  }
+}
+
+
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
diff --git a/src/v8threads.h b/src/v8threads.h
index 4002bb3..a2aee4e 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -72,7 +72,7 @@
 };
 
 
-// Defined in top.h
+// Defined in isolate.h.
 class ThreadLocalTop;
 
 
diff --git a/src/version.cc b/src/version.cc
index 3352735..8385bd7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     7
-#define BUILD_NUMBER      12
+#define MINOR_VERSION     8
+#define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d578bf9..ca3bece 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -426,13 +426,7 @@
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
   int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
-  while (delta >= 9) {
-    nop(9);
-    delta -= 9;
-  }
-  if (delta > 0) {
-    nop(delta);
-  }
+  Nop(delta);
 }
 
 
@@ -441,6 +435,15 @@
 }
 
 
+bool Assembler::IsNop(Address addr) {
+  Address a = addr;
+  while (*a == 0x66) a++;
+  if (*a == 0x90) return true;
+  if (a[0] == 0xf && a[1] == 0x1f) return true;
+  return false;
+}
+
+
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
   ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
@@ -1763,7 +1766,7 @@
 }
 
 
-void Assembler::nop(int n) {
+void Assembler::Nop(int n) {
   // The recommended muti-byte sequences of NOP instructions from the Intel 64
   // and IA-32 Architectures Software Developer's Manual.
   //
@@ -1778,73 +1781,64 @@
   // 9 bytes  66 NOP DWORD ptr [EAX + EAX*1 +         66 0F 1F 84 00 00 00 00
   //          00000000H]                              00H
 
-  ASSERT(1 <= n);
-  ASSERT(n <= 9);
   EnsureSpace ensure_space(this);
-  switch (n) {
-  case 1:
-    emit(0x90);
-    return;
-  case 2:
-    emit(0x66);
-    emit(0x90);
-    return;
-  case 3:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x00);
-    return;
-  case 4:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x40);
-    emit(0x00);
-    return;
-  case 5:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x44);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 6:
-    emit(0x66);
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x44);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 7:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x80);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 8:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x84);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 9:
-    emit(0x66);
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x84);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
+  while (n > 0) {
+    switch (n) {
+      case 2:
+        emit(0x66);
+      case 1:
+        emit(0x90);
+        return;
+      case 3:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x00);
+        return;
+      case 4:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x40);
+        emit(0x00);
+        return;
+      case 6:
+        emit(0x66);
+      case 5:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x44);
+        emit(0x00);
+        emit(0x00);
+        return;
+      case 7:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x80);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        return;
+      default:
+      case 11:
+        emit(0x66);
+        n--;
+      case 10:
+        emit(0x66);
+        n--;
+      case 9:
+        emit(0x66);
+        n--;
+      case 8:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x84);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        n -= 8;
+    }
   }
 }
 
@@ -2313,6 +2307,27 @@
 }
 
 
+void Assembler::f2xm1() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xF0);
+}
+
+
+void Assembler::fscale() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xFD);
+}
+
+
+void Assembler::fninit() {
+  EnsureSpace ensure_space(this);
+  emit(0xDB);
+  emit(0xE3);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -2572,7 +2587,8 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(is_uint2(imm8));
+  ASSERT(CpuFeatures::IsSupported(SSE4_1));
+  ASSERT(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   emit(0x66);
   emit_optional_rex_32(dst, src);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 1db5273..745850d 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -636,6 +636,7 @@
   // possible to align the pc offset to a multiple
   // of m, where m must be a power of 2.
   void Align(int m);
+  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -1154,7 +1155,6 @@
   void hlt();
   void int3();
   void nop();
-  void nop(int n);
   void rdtsc();
   void ret(int imm16);
   void setcc(Condition cc, Register reg);
@@ -1277,6 +1277,9 @@
   void fcos();
   void fptan();
   void fyl2x();
+  void f2xm1();
+  void fscale();
+  void fninit();
 
   void frndint();
 
@@ -1398,7 +1401,7 @@
     return static_cast<int>(reloc_info_writer.pos() - pc_);
   }
 
-  static bool IsNop(Address addr) { return *addr == 0x90; }
+  static bool IsNop(Address addr);
 
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512*MB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index e423ae3..1719496 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1305,6 +1305,9 @@
   __ jmp(&entry);
   __ bind(&loop);
   __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(kScratchRegister, call_generic_code);
+  }
   __ movq(Operand(rdx, 0), kScratchRegister);
   __ addq(rdx, Immediate(kPointerSize));
   __ bind(&entry);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 96f70bf..6f3e065 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1991,152 +1991,259 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  // Registers are used as follows:
-  // rdx = base
-  // rax = exponent
-  // rcx = temporary, result
+  // Choose register conforming to calling convention (when bailing out).
+#ifdef _WIN64
+  const Register exponent = rdx;
+#else
+  const Register exponent = rdi;
+#endif
+  const Register base = rax;
+  const Register scratch = rcx;
+  const XMMRegister double_result = xmm3;
+  const XMMRegister double_base = xmm2;
+  const XMMRegister double_exponent = xmm1;
+  const XMMRegister double_scratch = xmm4;
 
-  Label allocate_return, call_runtime;
+  Label call_runtime, done, exponent_not_smi, int_exponent;
 
-  // Load input parameters.
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+  // Save 1 in double_result - we need this several times later on.
+  __ movq(scratch, Immediate(1));
+  __ cvtlsi2sd(double_result, scratch);
 
-  // Save 1 in xmm3 - we need this several times later on.
-  __ Set(rcx, 1);
-  __ cvtlsi2sd(xmm3, rcx);
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack.
+    __ movq(base, Operand(rsp, 2 * kPointerSize));
+    __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+    __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
 
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(rax, &exponent_nonsmi);
-  __ JumpIfNotSmi(rdx, &base_nonsmi);
+    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent, Label::kNear);
 
-  // Optimized version when both exponent and base are smis.
-  Label powi;
-  __ SmiToInteger32(rdx, rdx);
-  __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&powi);
-  // Exponent is a smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
-  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
+    __ bind(&base_is_smi);
+    __ SmiToInteger32(base, base);
+    __ cvtlsi2sd(double_base, base);
+    __ bind(&unpack_exponent);
 
-  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiToInteger32(exponent, exponent);
+    __ jmp(&int_exponent);
 
-  // Optimized version of pow if exponent is a smi.
-  // xmm0 contains the base.
-  __ bind(&powi);
-  __ SmiToInteger32(rax, rax);
+    __ bind(&exponent_not_smi);
+    __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
+    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiToInteger32(exponent, exponent);
+    __ jmp(&int_exponent);
 
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ movq(rdx, rax);
+    __ bind(&exponent_not_smi);
+    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type_ != INTEGER) {
+    Label fast_power;
+    // Detect integer exponents stored as double.
+    __ cvttsd2si(exponent, double_exponent);
+    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+    __ cmpl(exponent, Immediate(0x80000000u));
+    __ j(equal, &call_runtime);
+    __ cvtlsi2sd(double_scratch, exponent);
+    // Already ruled out NaNs for exponent.
+    __ ucomisd(double_exponent, double_scratch);
+    __ j(equal, &int_exponent);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label continue_sqrt, continue_rsqrt, not_plus_half;
+      // Test for 0.5.
+      // Load double_scratch with 0.5.
+      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &not_plus_half, Label::kNear);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, double-precision -Infinity has the highest
+      // 12 bits set and the lowest 52 bits cleared.
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      __ ucomisd(double_scratch, double_base);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_sqrt, Label::kNear);
+      __ j(carry, &continue_sqrt, Label::kNear);
+
+      // Set result to Infinity in the special case.
+      __ xorps(double_result, double_result);
+      __ subsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&continue_sqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_scratch, double_scratch);
+      __ addsd(double_scratch, double_base);  // Convert -0 to 0.
+      __ sqrtsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      // Test for -0.5.
+      __ bind(&not_plus_half);
+      // Load double_scratch with -0.5 by substracting 1.
+      __ subsd(double_scratch, double_result);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &fast_power, Label::kNear);
+
+      // Calculates reciprocal of square root of base.  Check for the special
+      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, double-precision -Infinity has the highest
+      // 12 bits set and the lowest 52 bits cleared.
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      __ ucomisd(double_scratch, double_base);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_rsqrt, Label::kNear);
+      __ j(carry, &continue_rsqrt, Label::kNear);
+
+      // Set result to 0 in the special case.
+      __ xorps(double_result, double_result);
+      __ jmp(&done);
+
+      __ bind(&continue_rsqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_exponent, double_exponent);
+      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_exponent, double_exponent);
+      __ divsd(double_result, double_exponent);
+      __ jmp(&done);
+    }
+
+    // Using FPU instructions to calculate power.
+    Label fast_power_failed;
+    __ bind(&fast_power);
+    __ fnclex();  // Clear flags to catch exceptions later.
+    // Transfer (B)ase and (E)xponent onto the FPU register stack.
+    __ subq(rsp, Immediate(kDoubleSize));
+    __ movsd(Operand(rsp, 0), double_exponent);
+    __ fld_d(Operand(rsp, 0));  // E
+    __ movsd(Operand(rsp, 0), double_base);
+    __ fld_d(Operand(rsp, 0));  // B, E
+
+    // Exponent is in st(1) and base is in st(0)
+    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+    // FYL2X calculates st(1) * log2(st(0))
+    __ fyl2x();    // X
+    __ fld(0);     // X, X
+    __ frndint();  // rnd(X), X
+    __ fsub(1);    // rnd(X), X-rnd(X)
+    __ fxch(1);    // X - rnd(X), rnd(X)
+    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
+    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
+    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
+    // FSCALE calculates st(0) * 2^st(1)
+    __ fscale();   // 2^X, rnd(X)
+    __ fstp(1);
+    // Bail out to runtime in case of exceptions in the status word.
+    __ fnstsw_ax();
+    __ testb(rax, Immediate(0x5F));  // Check for all but precision exception.
+    __ j(not_zero, &fast_power_failed, Label::kNear);
+    __ fstp_d(Operand(rsp, 0));
+    __ movsd(double_result, Operand(rsp, 0));
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ jmp(&done);
+
+    __ bind(&fast_power_failed);
+    __ fninit();
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ jmp(&call_runtime);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+  const XMMRegister double_scratch2 = double_exponent;
+  // Back up exponent as we need to check if exponent is negative later.
+  __ movq(scratch, exponent);  // Back up exponent.
+  __ movsd(double_scratch, double_base);  // Back up base.
+  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
 
   // Get absolute value of exponent.
-  Label no_neg;
-  __ cmpl(rax, Immediate(0));
-  __ j(greater_equal, &no_neg, Label::kNear);
-  __ negl(rax);
+  Label no_neg, while_true, no_multiply;
+  __ testl(scratch, scratch);
+  __ j(positive, &no_neg, Label::kNear);
+  __ negl(scratch);
   __ bind(&no_neg);
 
-  // Load xmm1 with 1.
-  __ movaps(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
   __ bind(&while_true);
-  __ shrl(rax, Immediate(1));
+  __ shrl(scratch, Immediate(1));
   __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(xmm1, xmm0);
+  __ mulsd(double_result, double_scratch);
   __ bind(&no_multiply);
-  __ mulsd(xmm0, xmm0);
+
+  __ mulsd(double_scratch, double_scratch);
   __ j(not_zero, &while_true);
 
-  // Base has the original value of the exponent - if the exponent  is
-  // negative return 1/result.
-  __ testl(rdx, rdx);
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ divsd(xmm3, xmm1);
-  __ movaps(xmm1, xmm3);
-  __ xorps(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  __ j(equal, &call_runtime);
+  // If the exponent is negative, return 1/result.
+  __ testl(exponent, exponent);
+  __ j(greater, &done);
+  __ divsd(double_scratch2, double_result);
+  __ movsd(double_result, double_scratch2);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ xorps(double_scratch2, double_scratch2);
+  __ ucomisd(double_scratch2, double_result);
+  // double_exponent aliased as double_scratch2 has already been overwritten
+  // and may not have contained the exponent value in the first place when the
+  // input was a smi.  We reset it with exponent value before bailing out.
+  __ j(not_equal, &done);
+  __ cvtlsi2sd(double_exponent, exponent);
 
-  __ jmp(&allocate_return);
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 
-  // Exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
-  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  __ j(parity_even, &call_runtime);
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in eax.
+    __ bind(&done);
+    __ AllocateHeapNumber(rax, rcx, &call_runtime);
+    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(2 * kPointerSize);
+  } else {
+    __ bind(&call_runtime);
+    // Move base to the correct argument register.  Exponent is already in xmm1.
+    __ movsd(xmm0, double_base);
+    ASSERT(double_exponent.is(xmm1));
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(2);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()), 2);
+    }
+    // Return value is in xmm0.
+    __ movsd(double_result, xmm0);
+    // Restore context register.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 
-  Label base_not_smi, handle_special_cases;
-  __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
-  __ SmiToInteger32(rdx, rdx);
-  __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&handle_special_cases, Label::kNear);
-
-  __ bind(&base_not_smi);
-  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
-  __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
-  __ andl(rcx, Immediate(HeapNumber::kExponentMask));
-  __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  __ j(greater_equal, &call_runtime);
-  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
-  __ movq(xmm2, rcx);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half, Label::kNear);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movaps(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &call_runtime);
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
-  __ sqrtsd(xmm1, xmm1);
-
-  __ bind(&allocate_return);
-  __ AllocateHeapNumber(rcx, rax, &call_runtime);
-  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
-  __ movq(rax, rcx);
-  __ ret(2 * kPointerSize);
-
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(0);
+  }
 }
 
 
@@ -5501,33 +5608,46 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  // Save the registers.
-  __ pop(rcx);
-  __ push(rdx);
-  __ push(rax);
-  __ push(rcx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+  __ j(either_smi, &miss, Label::kNear);
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss =
-      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ Cmp(rcx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+  __ Cmp(rbx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ subq(rax, rdx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ push(rdx);
     __ push(rax);
+    __ push(rdx);
+    __ push(rax);
     __ Push(Smi::FromInt(op_));
     __ CallExternalReference(miss, 3);
+
+    // Compute the entry point of the rewritten stub.
+    __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+    __ pop(rax);
+    __ pop(rdx);
   }
 
-  // Compute the entry point of the rewritten stub.
-  __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
-  // Restore registers.
-  __ pop(rcx);
-  __ pop(rax);
-  __ pop(rdx);
-  __ push(rcx);
-
   // Do a tail call to the rewritten stub.
   __ jmp(rdi);
 }
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 339b961..f84772e 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -264,9 +264,7 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
-    __ nop();
-  }
+  __ Nop(Assembler::kDebugBreakSlotLength);
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 1fd78fc..d684ad7 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -138,8 +138,8 @@
   ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
          *(call_target_address - 2) == 0x07 &&  // offset
          *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x90;  // nop
-  *(call_target_address - 2) = 0x90;  // nop
+  *(call_target_address - 3) = 0x66;  // 2 byte nop part 1
+  *(call_target_address - 2) = 0x90;  // 2 byte nop part 2
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
 
@@ -157,8 +157,8 @@
          Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
-         *(call_target_address - 2) == 0x90 &&  // nop
+  ASSERT(*(call_target_address - 3) == 0x66 &&  // 2 byte nop part 1
+         *(call_target_address - 2) == 0x90 &&  // 2 byte nop part 2
          *(call_target_address - 1) == 0xe8);   // call
   *(call_target_address - 3) = 0x73;  // jae
   *(call_target_address - 2) = 0x07;  // offset
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 1b8871f..5cbdad7 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -109,6 +109,7 @@
   { 0xC3, UNSET_OP_ORDER, "ret" },
   { 0xC9, UNSET_OP_ORDER, "leave" },
   { 0xF4, UNSET_OP_ORDER, "hlt" },
+  { 0xFC, UNSET_OP_ORDER, "cld" },
   { 0xCC, UNSET_OP_ORDER, "int3" },
   { 0x60, UNSET_OP_ORDER, "pushad" },
   { 0x61, UNSET_OP_ORDER, "popad" },
@@ -910,15 +911,19 @@
           switch (modrm_byte) {
             case 0xE0: mnem = "fchs"; break;
             case 0xE1: mnem = "fabs"; break;
+            case 0xE3: mnem = "fninit"; break;
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
+            case 0xF2: mnem = "fptan"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
+            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -1034,7 +1039,18 @@
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x28) {
+      if (opcode == 0x1f) {
+        current++;
+        if (rm == 4) {  // SIB byte present.
+          current++;
+        }
+        if (mod == 1) {  // Byte displacement.
+          current += 1;
+        } else if (mod == 2) {  // 32-bit displacement.
+          current += 4;
+        }  // else no immediate displacement.
+        AppendToBuffer("nop");
+      } else if (opcode == 0x28) {
         AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
         current += PrintRightXMMOperand(current);
       } else if (opcode == 0x29) {
@@ -1178,7 +1194,7 @@
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
     current++;
-    if (regop == 4) {  // SIB byte present.
+    if (rm == 4) {  // SIB byte present.
       current++;
     }
     if (mod == 1) {  // Byte displacement.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 963912f..24df20b 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -2820,7 +2820,7 @@
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  MathPowStub stub;
+  MathPowStub stub(MathPowStub::ON_STACK);
   __ CallStub(&stub);
   context()->Plug(rax);
 }
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 3a57753..b3a9422 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1641,6 +1641,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index cbbe65f..293a1db 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -49,7 +49,9 @@
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) const { }
+  virtual void BeforeCall(int call_size) const {
+    codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
+  }
 
   virtual void AfterCall() const {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
@@ -241,7 +243,7 @@
       instr->CompileToNative(this);
     }
   }
-  EnsureSpaceForLazyDeopt();
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   return !is_aborted();
 }
 
@@ -439,6 +441,7 @@
                                LInstruction* instr,
                                SafepointMode safepoint_mode,
                                int argc) {
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -1151,8 +1154,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ Move(ToRegister(instr->result()), instr->value());
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ Move(ToRegister(instr->result()), value);
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
 }
 
 
@@ -1929,7 +1937,7 @@
     InstanceofStub stub(flags);
 
     __ push(ToRegister(instr->InputAt(0)));
-    __ Push(instr->function());
+    __ PushHeapObject(instr->function());
 
     static const int kAdditionalDelta = 10;
     int delta =
@@ -1999,13 +2007,7 @@
 
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
-  if (result.is(rax)) {
-    __ load_rax(instr->hydrogen()->cell().location(),
-                RelocInfo::GLOBAL_PROPERTY_CELL);
-  } else {
-    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(result, Operand(result, 0));
-  }
+  __ LoadGlobalCell(result, instr->hydrogen()->cell());
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
@@ -2045,25 +2047,7 @@
 
   // Store the value.
   __ movq(Operand(address, 0), value);
-
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    Label smi_store;
-    HType type = instr->hydrogen()->value()->type();
-    if (!type.IsHeapNumber() && !type.IsString() && !type.IsNonPrimitive()) {
-      __ JumpIfSmi(value, &smi_store, Label::kNear);
-    }
-
-    int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
-    __ lea(object, Operand(address, -offset));
-    // Cells are always in the remembered set.
-    __ RecordWrite(object,
-                   address,
-                   value,
-                   kSaveFPRegs,
-                   OMIT_REMEMBERED_SET,
-                   OMIT_SMI_CHECK);
-    __ bind(&smi_store);
-  }
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2083,13 +2067,22 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ movq(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ movq(ContextOperand(context, instr->slot_index()), value);
+  Operand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
+  __ movq(target, value);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
     HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
@@ -2141,7 +2134,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
   }
 }
 
@@ -2563,7 +2556,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  LoadHeapObject(result, instr->hydrogen()->closure());
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2634,7 +2627,7 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ Move(rdi, instr->function());
+  __ LoadHeapObject(rdi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2808,10 +2801,10 @@
   // This addition might give a result that isn't the correct for
   // rounding, due to loss of precision, but only for a number that's
   // so big that the conversion below will overflow anyway.
-  __ addsd(input_reg, xmm_scratch);
+  __ addsd(xmm_scratch, input_reg);
   // Compute Math.floor(input).
   // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
+  __ cvttsd2si(output_reg, xmm_scratch);
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x80000000));
   DeoptimizeIf(equal, instr->environment());
@@ -2849,65 +2842,68 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, double-precision
+  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+  __ movq(xmm_scratch, kScratchRegister);
+  __ ucomisd(xmm_scratch, input_reg);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ xorps(input_reg, input_reg);
+  __ subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  XMMRegister left_reg = ToDoubleRegister(left);
-  ASSERT(!left_reg.is(xmm1));
-  LOperand* right = instr->InputAt(1);
-  XMMRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  if (exponent_type.IsDouble()) {
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers
-    __ movaps(xmm0, left_reg);
-    ASSERT(ToDoubleRegister(right).is(xmm1));
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 2);
-  } else if (exponent_type.IsInteger32()) {
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers: xmm0 and edi (not rdi).
-    // On Windows, the registers are xmm0 and edx.
-    __ movaps(xmm0, left_reg);
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+
+  // Choose register conforming to calling convention (when bailing out).
 #ifdef _WIN64
-    ASSERT(ToRegister(right).is(rdx));
+  Register exponent = rdx;
 #else
-    ASSERT(ToRegister(right).is(rdi));
+  Register exponent = rdi;
 #endif
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 2);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    Register right_reg = ToRegister(right);
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(exponent));
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
 
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-    __ SmiToInteger32(right_reg, right_reg);
-    __ cvtlsi2sd(xmm1, right_reg);
-    __ jmp(&call);
-
-    __ bind(&non_smi);
-    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(exponent, &no_deopt);
+    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
-    __ bind(&call);
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers xmm0 and xmm1.
-    __ movaps(xmm0, left_reg);
-    // Right argument is already in xmm1.
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 2);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-  // Return value is in xmm0.
-  __ movaps(result_reg, xmm0);
-  // Restore context register.
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3042,7 +3038,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ Move(rdi, instr->target());
+  __ LoadHeapObject(rdi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3725,9 +3721,16 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  __ Cmp(reg, instr->hydrogen()->target());
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ cmpq(reg, Operand(kScratchRegister, 0));
+  } else {
+    __ Cmp(reg, target);
+  }
   DeoptimizeIf(not_equal, instr->environment());
 }
 
@@ -3793,18 +3796,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
-  if (heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        factory()->NewJSGlobalPropertyCell(object);
-    __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(result, Operand(result, 0));
-  } else {
-    __ Move(result, object);
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -3812,7 +3803,7 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(reg, current_prototype);
+  __ LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
@@ -3822,7 +3813,7 @@
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(reg, current_prototype);
+    __ LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
@@ -3833,16 +3824,32 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
+    __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(rbx, Immediate(Map::kElementsKindMask));
+    __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
+                           Map::kElementsKindShift));
+    DeoptimizeIf(not_equal, instr->environment());
+  }
 
   // Setup the parameters to the stub/runtime call.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  __ Push(instr->hydrogen()->constant_elements());
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -3858,9 +3865,9 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
@@ -3899,10 +3906,10 @@
       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
       __ lea(rcx, Operand(result, *offset));
       __ movq(FieldOperand(result, total_offset), rcx);
-      LoadHeapObject(source, value_object);
+      __ LoadHeapObject(source, value_object);
       EmitDeepCopy(value_object, result, source, offset);
     } else if (value->IsHeapObject()) {
-      LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+      __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
       __ movq(FieldOperand(result, total_offset), rcx);
     } else {
       __ movq(rcx, value, RelocInfo::NONE);
@@ -3927,7 +3934,7 @@
 
   __ bind(&allocated);
   int offset = 0;
-  LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
   EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
   ASSERT_EQ(size, offset);
 }
@@ -4053,7 +4060,12 @@
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   ASSERT(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(operand)));
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
   } else if (operand->IsRegister()) {
     __ push(ToRegister(operand));
   } else {
@@ -4176,25 +4188,20 @@
 }
 
 
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
-  int patch_size = Deoptimizer::patch_size();
-  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
-    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    while (padding_size > 0) {
-      int nop_size = padding_size > 9 ? 9 : padding_size;
-      __ nop(nop_size);
-      padding_size -= nop_size;
-    }
+  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+    __ Nop(padding_size);
   }
-  last_lazy_deopt_pc_ = masm()->pc_offset();
 }
 
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  EnsureSpaceForLazyDeopt();
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  last_lazy_deopt_pc_ = masm()->pc_offset();
   ASSERT(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -4272,7 +4279,8 @@
     __ j(above_equal, &done, Label::kNear);
     StackCheckStub stub;
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-    EnsureSpaceForLazyDeopt();
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    last_lazy_deopt_pc_ = masm()->pc_offset();
     __ bind(&done);
     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -4283,7 +4291,8 @@
         new DeferredStackCheck(this, instr);
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(below, deferred_stack_check->entry());
-    EnsureSpaceForLazyDeopt();
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    last_lazy_deopt_pc_ = masm()->pc_offset();
     __ bind(instr->done_label());
     deferred_stack_check->SetExit(instr->done_label());
     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 868f75e..832942f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -198,7 +198,6 @@
                          LInstruction* instr,
                          CallKind call_kind);
 
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode,
@@ -305,7 +304,7 @@
     Address address;
   };
 
-  void EnsureSpaceForLazyDeopt();
+  void EnsureSpaceForLazyDeopt(int space_needed);
 
   LChunk* const chunk_;
   MacroAssembler* const masm_;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index b486fae..7fa8a39 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1397,7 +1397,7 @@
       UseFixed(instr->right(), rdi);
 #endif
   LPower* result = new LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1786,7 +1786,8 @@
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1803,7 +1804,8 @@
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new LStoreContextSlot(context, value, temp);
+  LInstruction* result = new LStoreContextSlot(context, value, temp);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index c21223b..280d235 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1781,6 +1781,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return InputAt(0); }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index caca628..10e423b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2238,6 +2238,43 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(result, Operand(result, 0));
+  } else {
+    Move(result, object);
+  }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(kScratchRegister, Operand(kScratchRegister, 0));
+    push(kScratchRegister);
+  } else {
+    Push(object);
+  }
+}
+
+
+void MacroAssembler::LoadGlobalCell(Register dst,
+                                    Handle<JSGlobalPropertyCell> cell) {
+  if (dst.is(rax)) {
+    load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+  } else {
+    movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(dst, Operand(dst, 0));
+  }
+}
+
+
 void MacroAssembler::Push(Smi* source) {
   intptr_t smi = reinterpret_cast<intptr_t>(source);
   if (is_int32(smi)) {
@@ -3049,7 +3086,7 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  Move(rdi, function);
+  LoadHeapObject(rdi, function);
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // We call indirectly through the code field in the function to
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index cf03e59..8046e5c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -784,6 +784,14 @@
   void Cmp(const Operand& dst, Smi* src);
   void Push(Handle<Object> source);
 
+  // Load a heap object and handle the case of new-space objects by
+  // indirecting via a global cell.
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+  void PushHeapObject(Handle<HeapObject> object);
+
+  // Load a global cell into a register.
+  void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the rsp register.
   void Drop(int stack_elements);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 5a81c89..a28dbbf 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -421,7 +421,7 @@
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
-  __ Move(rdi, function);
+  __ LoadHeapObject(rdi, function);
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
@@ -1015,7 +1015,7 @@
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<Object> value,
+                                        Handle<JSFunction> value,
                                         Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
@@ -1026,7 +1026,7 @@
       object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ Move(rax, value);
+  __ LoadHeapObject(rax, value);
   __ ret(0);
 }
 
@@ -2370,23 +2370,9 @@
 
   // Store the value in the cell.
   __ movq(cell_operand, rax);
-  Label done;
-  __ JumpIfSmi(rax, &done);
-
-  __ movq(rcx, rax);
-  __ lea(rdx, cell_operand);
-  // Cells are always in the remembered set.
-  __ RecordWrite(rbx,  // Object.
-                 rdx,  // Address.
-                 rcx,  // Value.
-                 kDontSaveFPRegs,
-                 OMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-
+  // Cells are always rescanned, so no write barrier here.
 
   // Return the value (register rax).
-  __ bind(&done);
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1);
   __ ret(0);
@@ -2578,7 +2564,7 @@
 
 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
                                                    Handle<JSObject> holder,
-                                                   Handle<Object> value,
+                                                   Handle<JSFunction> value,
                                                    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
@@ -2732,7 +2718,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<Object> value) {
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver