Version 3.26.1 (based on bleeding_edge revision r20368)

Fix Type::Intersect to skip uninhabited bitsets (Chromium issue 357330).

Fix PrepareKeyedOperand on arm (Chromium issue 358057).

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@20371 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 708e04e..26f70c2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2014-04-01: Version 3.26.1
+
+        Fix Type::Intersect to skip uninhabited bitsets (Chromium issue 357330).
+
+        Fix PrepareKeyedOperand on arm (Chromium issue 358057).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-03-31: Version 3.26.0
 
         Deprecate Start/StopCpuProfiling methods (issue 3213).
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 4f7a960..3755a53 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -103,6 +103,7 @@
 def GetPreferredTryMasters(project, change):
   return {
     'tryserver.v8': {
+      'v8_linux_rel': set(['defaulttests']),
       'v8_mac_rel': set(['defaulttests']),
       'v8_win_rel': set(['defaulttests']),
     },
diff --git a/include/v8.h b/include/v8.h
index 5f5e08d..50480b8 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -524,6 +524,13 @@
     return !operator==(that);
   }
 
+  /**
+   *  Install a finalization callback on this object.
+   *  NOTE: There is no guarantee as to *when* or even *if* the callback is
+   *  invoked. The invocation is performed solely on a best effort basis.
+   *  As always, GC-based finalization should *not* be relied upon for any
+   *  critical form of resource management!
+   */
   template<typename P>
   V8_INLINE void SetWeak(
       P* parameter,
diff --git a/src/api.cc b/src/api.cc
index 56fc792..9e24fee 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3736,8 +3736,7 @@
           object,
           GetElementsKindFromExternalArrayType(array_type));
 
-  object->set_map(*external_array_map);
-  object->set_elements(*array);
+  i::JSObject::SetMapAndElements(object, external_array_map, array);
 }
 
 }  // namespace
@@ -6101,7 +6100,7 @@
           static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
   i::Handle<i::Map> map =
       i::JSObject::GetElementsTransitionMap(obj, elements_kind);
-  obj->set_map_and_elements(*map, *elements);
+  i::JSObject::SetMapAndElements(obj, map, elements);
   return obj;
 }
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 2ca8016..3539bbc 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -118,10 +118,14 @@
     Isolate* isolate = masm_->isolate();
   Label ok;
   ASSERT(scratch.is(sp) == (pointers == 0));
+  Heap::RootListIndex index;
   if (pointers != 0) {
     __ sub(scratch, sp, Operand(pointers * kPointerSize));
+    index = Heap::kRealStackLimitRootIndex;
+  } else {
+    index = Heap::kStackLimitRootIndex;
   }
-  __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+  __ LoadRoot(stack_limit_scratch, index);
   __ cmp(scratch, Operand(stack_limit_scratch));
   __ b(hs, &ok);
   PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index c280999..ba44b50 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2539,7 +2539,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 9cf94ee..b02574b 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -3309,7 +3309,8 @@
       __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
     } else {
       ASSERT_EQ(-1, shift_size);
-      __ add(scratch0(), scratch0(), Operand(key, LSR, 1));
+      // key can be negative, so using ASR here.
+      __ add(scratch0(), scratch0(), Operand(key, ASR, 1));
     }
     return MemOperand(base, scratch0());
   }
@@ -5716,13 +5717,61 @@
 }
 
 
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ Push(object);
+  __ Push(index);
+  __ mov(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
 
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
   Label out_of_object, done;
+
+  __ tst(index, Operand(Smi::FromInt(1)));
+  __ b(ne, deferred->entry());
+  __ mov(index, Operand(index, ASR, 1));
+
   __ cmp(index, Operand::Zero());
   __ b(lt, &out_of_object);
 
@@ -5738,6 +5787,7 @@
   __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
   __ ldr(result, FieldMemOperand(scratch,
                                  FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 21da500..38843f1 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -141,6 +141,10 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 1aae2f2..50f86cc 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -285,9 +285,9 @@
   static const unsigned kAllocatableLowRangeBegin = 0;
   static const unsigned kAllocatableLowRangeEnd = 14;
   static const unsigned kAllocatableHighRangeBegin = 16;
-  static const unsigned kAllocatableHighRangeEnd = 29;
+  static const unsigned kAllocatableHighRangeEnd = 28;
 
-  static const RegList kAllocatableFPRegisters = 0x3fff7fff;
+  static const RegList kAllocatableFPRegisters = 0x1fff7fff;
 
   // Gap between low and high ranges.
   static const int kAllocatableRangeGapSize =
@@ -316,12 +316,12 @@
     ASSERT((kAllocatableLowRangeBegin == 0) &&
            (kAllocatableLowRangeEnd == 14) &&
            (kAllocatableHighRangeBegin == 16) &&
-           (kAllocatableHighRangeEnd == 29));
+           (kAllocatableHighRangeEnd == 28));
     const char* const names[] = {
       "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
       "d8", "d9", "d10", "d11", "d12", "d13", "d14",
       "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
-      "d24", "d25", "d26", "d27", "d28", "d29"
+      "d24", "d25", "d26", "d27", "d28"
     };
     return names[index];
   }
@@ -420,9 +420,11 @@
 // Keeps the 0 double value.
 ALIAS_REGISTER(FPRegister, fp_zero, d15);
 // Crankshaft double scratch register.
-ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30);
-// MacroAssembler double scratch register.
-ALIAS_REGISTER(FPRegister, fp_scratch, d31);
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
+// MacroAssembler double scratch registers.
+ALIAS_REGISTER(FPRegister, fp_scratch, d30);
+ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
+ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
 
 #undef ALIAS_REGISTER
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 7e8267b..d112271 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -1393,9 +1393,8 @@
 
     // Return.
     __ Bind(&done);
-    __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
-    __ Str(result_double,
-        FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
+    __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
+                          result_double);
     ASSERT(result_tagged.is(x0));
     __ IncrementCounter(
         masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index 831d449..0944c4b 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -339,8 +339,8 @@
 
     // Non-hole double, copy value into a heap number.
     Register heap_num = x5;
-    __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
-    __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
+    __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
+                          x13, heap_num_map);
     __ Mov(x13, dst_elements);
     __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
     __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index d8f1402..46dabae 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -117,10 +117,14 @@
   Label ok;
   ASSERT(jssp.Is(__ StackPointer()));
   ASSERT(scratch.Is(jssp) == (pointers == 0));
+  Heap::RootListIndex index;
   if (pointers != 0) {
     __ Sub(scratch, jssp, pointers * kPointerSize);
+    index = Heap::kRealStackLimitRootIndex;
+  } else {
+    index = Heap::kStackLimitRootIndex;
   }
-  __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+  __ CompareRoot(scratch, index);
   __ B(hs, &ok);
   PredictableCodeSizeScope predictable(masm_,
                                        Assembler::kCallSizeWithRelocation);
@@ -4720,22 +4724,23 @@
   Register result_value = x2;
   Register boolean_done = x3;
   Register empty_fixed_array = x4;
+  Register untagged_result = x5;
   __ Mov(map_reg, Operand(map));
   __ Pop(result_value);
   __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
   __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
   ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
-  // TODO(jbramley): Use Stp if possible.
-  __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
-  __ Str(empty_fixed_array,
-         FieldMemOperand(result, JSObject::kPropertiesOffset));
-  __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
-  __ Str(result_value,
-         FieldMemOperand(result,
-                         JSGeneratorObject::kResultValuePropertyOffset));
-  __ Str(boolean_done,
-         FieldMemOperand(result,
-                         JSGeneratorObject::kResultDonePropertyOffset));
+  STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
+                JSObject::kElementsOffset);
+  STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
+                JSGeneratorObject::kResultDonePropertyOffset);
+  __ ObjectUntag(untagged_result, result);
+  __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
+  __ Stp(empty_fixed_array, empty_fixed_array,
+         MemOperand(untagged_result, JSObject::kPropertiesOffset));
+  __ Stp(result_value, boolean_done,
+         MemOperand(untagged_result,
+                    JSGeneratorObject::kResultValuePropertyOffset));
 
   // Only the value field needs a write barrier, as the other values are in the
   // root set.
diff --git a/src/arm64/lithium-arm64.cc b/src/arm64/lithium-arm64.cc
index 6dccc5f..4bed135 100644
--- a/src/arm64/lithium-arm64.cc
+++ b/src/arm64/lithium-arm64.cc
@@ -2562,7 +2562,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegisterAtStart(instr->object());
   LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }
 
 
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index abae911..bea79ba 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -5879,14 +5879,61 @@
 }
 
 
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ Push(object);
+  __ Push(index);
+  __ Mov(cp, 0);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   __ AssertSmi(index);
 
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
   Label out_of_object, done;
+
+  __ TestAndBranchIfAnySet(
+      index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+  __ Mov(index, Operand(index, ASR, 1));
+
   __ Cmp(index, Smi::FromInt(0));
   __ B(lt, &out_of_object);
 
@@ -5902,6 +5949,7 @@
   __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   __ Ldr(result, FieldMemOperand(result,
                                  FixedArray::kHeaderSize - kPointerSize));
+  __ Bind(deferred->exit());
   __ Bind(&done);
 }
 
diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h
index b1d8b70..e06f05c 100644
--- a/src/arm64/lithium-codegen-arm64.h
+++ b/src/arm64/lithium-codegen-arm64.h
@@ -149,6 +149,10 @@
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
 
   Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
 
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h
index d660d36..4fd6d76 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -1409,6 +1409,30 @@
 }
 
 
+void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  if (emit_debug_code()) {
+    Label ok;
+    Tbz(obj, 0, &ok);
+    Abort(kObjectTagged);
+    Bind(&ok);
+  }
+  Orr(tagged_obj, obj, kHeapObjectTag);
+}
+
+
+void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  if (emit_debug_code()) {
+    Label ok;
+    Tbnz(obj, 0, &ok);
+    Abort(kObjectNotTagged);
+    Bind(&ok);
+  }
+  Bic(untagged_obj, obj, kHeapObjectTag);
+}
+
+
 void MacroAssembler::IsObjectNameType(Register object,
                                       Register type,
                                       Label* fail) {
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 08ddb87..12c7dd8 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -53,7 +53,7 @@
 #endif
       has_frame_(false),
       use_real_aborts_(true),
-      sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
+      sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch1, fp_scratch2) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -1196,7 +1196,7 @@
 }
 
 
-void MacroAssembler::LoadRoot(Register destination,
+void MacroAssembler::LoadRoot(CPURegister destination,
                               Heap::RootListIndex index) {
   // TODO(jbramley): Most root values are constants, and can be synthesized
   // without a load. Refer to the ARM back end for details.
@@ -3267,7 +3267,7 @@
 
   // Tag the object if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    Orr(result, result, kHeapObjectTag);
+    ObjectTag(result, result);
   }
 }
 
@@ -3349,7 +3349,7 @@
 
   // Tag the object if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    Orr(result, result, kHeapObjectTag);
+    ObjectTag(result, result);
   }
 }
 
@@ -3533,32 +3533,50 @@
                                         Label* gc_required,
                                         Register scratch1,
                                         Register scratch2,
-                                        Register heap_number_map) {
+                                        CPURegister value,
+                                        CPURegister heap_number_map) {
+  ASSERT(!value.IsValid() || value.Is64Bits());
+  UseScratchRegisterScope temps(this);
+
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
-  // Store heap number map in the allocated object.
-  if (heap_number_map.Is(NoReg)) {
-    heap_number_map = scratch1;
+  // Prepare the heap number map.
+  if (!heap_number_map.IsValid()) {
+    // If we have a valid value register, use the same type of register to store
+    // the map so we can use STP to store both in one instruction.
+    if (value.IsValid() && value.IsFPRegister()) {
+      heap_number_map = temps.AcquireD();
+    } else {
+      heap_number_map = scratch1;
+    }
     LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   }
-  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-}
+  if (emit_debug_code()) {
+    Register map;
+    if (heap_number_map.IsFPRegister()) {
+      map = scratch1;
+      Fmov(map, DoubleRegister(heap_number_map));
+    } else {
+      map = Register(heap_number_map);
+    }
+    AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+  }
 
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
-                                                 DoubleRegister value,
-                                                 Label* gc_required,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Register heap_number_map) {
-  // TODO(all): Check if it would be more efficient to use STP to store both
-  // the map and the value.
-  AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
-  Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+  // Store the heap number map and the value in the allocated object.
+  if (value.IsSameSizeAndType(heap_number_map)) {
+    STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
+                  HeapNumber::kValueOffset);
+    Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
+  } else {
+    Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+    if (value.IsValid()) {
+      Str(value, MemOperand(result, HeapNumber::kValueOffset));
+    }
+  }
+  ObjectTag(result, result);
 }
 
 
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 1777c38..3061c1f 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -802,7 +802,7 @@
   inline void InitializeRootRegister();
 
   // Load an object from the root table.
-  void LoadRoot(Register destination,
+  void LoadRoot(CPURegister destination,
                 Heap::RootListIndex index);
   // Store an object to the root table.
   void StoreRoot(Register source,
@@ -883,6 +883,9 @@
   void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
   void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
 
+  inline void ObjectTag(Register tagged_obj, Register obj);
+  inline void ObjectUntag(Register untagged_obj, Register obj);
+
   // Abort execution if argument is not a name, enabled via --debug-code.
   void AssertName(Register object);
 
@@ -1354,13 +1357,8 @@
                           Label* gc_required,
                           Register scratch1,
                           Register scratch2,
-                          Register heap_number_map = NoReg);
-  void AllocateHeapNumberWithValue(Register result,
-                                   DoubleRegister value,
-                                   Label* gc_required,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register heap_number_map = NoReg);
+                          CPURegister value = NoFPReg,
+                          CPURegister heap_number_map = NoReg);
 
   // ---------------------------------------------------------------------------
   // Support functions.
diff --git a/src/arm64/stub-cache-arm64.cc b/src/arm64/stub-cache-arm64.cc
index 1b2e959..aed9af9 100644
--- a/src/arm64/stub-cache-arm64.cc
+++ b/src/arm64/stub-cache-arm64.cc
@@ -398,9 +398,7 @@
     DoubleRegister temp_double = temps.AcquireD();
     __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
 
-    Label do_store, heap_number;
-    __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
-
+    Label do_store;
     __ JumpIfSmi(value_reg, &do_store);
 
     __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
@@ -408,7 +406,7 @@
     __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
 
     __ Bind(&do_store);
-    __ Str(temp_double, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+    __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double);
   }
 
   // Stub never generated for non-global objects that require access checks.
diff --git a/src/builtins.cc b/src/builtins.cc
index fd4723f..200f7fe 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -210,6 +210,8 @@
 static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
                                           FixedArrayBase* elms,
                                           int to_trim) {
+  ASSERT(heap->CanMoveObjectStart(elms));
+
   Map* map = elms->map();
   int entry_size;
   if (elms->IsFixedArray()) {
@@ -246,6 +248,8 @@
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
   // we still do it.
+  // Since left trimming is only performed on pages which are not concurrently
+  // swept creating a filler object does not require synchronization.
   heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
 
   int new_start_index = to_trim * (entry_size / kPointerSize);
@@ -554,7 +558,7 @@
     first = isolate->factory()->undefined_value();
   }
 
-  if (!heap->CanMoveObjectStart(*elms_obj)) {
+  if (heap->CanMoveObjectStart(*elms_obj)) {
     array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
   } else {
     // Shift the elements.
diff --git a/src/compiler.cc b/src/compiler.cc
index 4b53989..a012d81 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -354,6 +354,10 @@
     return AbortAndDisableOptimization(kTooManyParametersLocals);
   }
 
+  if (scope->HasIllegalRedeclaration()) {
+    return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
+  }
+
   // Take --hydrogen-filter into account.
   if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
     return AbortOptimization(kHydrogenFilter);
diff --git a/src/elements.cc b/src/elements.cc
index 0624a03..ae1313f 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -962,11 +962,15 @@
         if (length == 0) {
           array->initialize_elements();
         } else {
-          backing_store->set_length(length);
+          int filler_size = (old_capacity - length) * ElementSize;
           Address filler_start = backing_store->address() +
               BackingStore::OffsetOfElementAt(length);
-          int filler_size = (old_capacity - length) * ElementSize;
           array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+
+          // We are storing the new length using release store after creating a
+          // filler for the left-over space to avoid races with the sweeper
+          // thread.
+          backing_store->synchronized_set_length(length);
         }
       } else {
         // Otherwise, fill the unused tail with holes.
diff --git a/src/handles.cc b/src/handles.cc
index ea0f8ae..edb657c 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -682,6 +682,10 @@
             if (field_index >= map->inobject_properties()) {
               field_index = -(field_index - map->inobject_properties() + 1);
             }
+            field_index = field_index << 1;
+            if (details.representation().IsDouble()) {
+              field_index |= 1;
+            }
             indices->set(index, Smi::FromInt(field_index));
           }
         }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 81ecb89..f920a65 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -7504,6 +7504,7 @@
                     HValue* index) {
     SetOperandAt(0, object);
     SetOperandAt(1, index);
+    SetChangesFlag(kNewSpacePromotion);
     set_representation(Representation::Tagged());
   }
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 1a7d828..c3c1bda 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -106,12 +106,14 @@
                            Register scratch = esp) {
     Label ok;
     Isolate* isolate = masm_->isolate();
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(isolate);
     ASSERT(scratch.is(esp) == (pointers == 0));
+    ExternalReference stack_limit;
     if (pointers != 0) {
       __ mov(scratch, esp);
       __ sub(scratch, Immediate(pointers * kPointerSize));
+      stack_limit = ExternalReference::address_of_real_stack_limit(isolate);
+    } else {
+      stack_limit = ExternalReference::address_of_stack_limit(isolate);
     }
     __ cmp(scratch, Operand::StaticVariable(stack_limit));
     __ j(above_equal, &ok, Label::kNear);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index b3c06d6..4ffbe53 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -6323,11 +6323,56 @@
 }
 
 
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ push(object);
+  __ push(index);
+  __ xor_(esi, esi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index,
+                              const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
 
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, object, index, x87_stack_);
+
   Label out_of_object, done;
+  __ test(index, Immediate(Smi::FromInt(1)));
+  __ j(not_zero, deferred->entry());
+
+  __ sar(index, 1);
+
   __ cmp(index, Immediate(0));
   __ j(less, &out_of_object, Label::kNear);
   __ mov(object, FieldOperand(object,
@@ -6344,6 +6389,7 @@
                               index,
                               times_half_pointer_size,
                               FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 079595c..b29f0f1 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -163,6 +163,9 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 8fa6dbd..384a21c 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2702,7 +2702,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }
 
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index a676fea..8c92c94 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -127,10 +127,14 @@
   Isolate* isolate = masm_->isolate();
   Label ok;
   ASSERT(scratch.is(sp) == (pointers == 0));
+  Heap::RootListIndex index;
   if (pointers != 0) {
     __ Subu(scratch, sp, Operand(pointers * kPointerSize));
+    index = Heap::kRealStackLimitRootIndex;
+  } else {
+    index = Heap::kStackLimitRootIndex;
   }
-  __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+  __ LoadRoot(stack_limit_scratch, index);
   __ Branch(&ok, hs, scratch, Operand(stack_limit_scratch));
   PredictableCodeSizeScope predictable(masm_, 4 * Assembler::kInstrSize);
   __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index ddd321c..41fe7b1 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -125,6 +125,15 @@
     WRITE_FIELD(this, offset, Smi::FromInt(value));     \
   }
 
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset)    \
+  int holder::synchronized_##name() {                       \
+    Object* value = ACQUIRE_READ_FIELD(this, offset);       \
+    return Smi::cast(value)->value();                       \
+  }                                                         \
+  void holder::synchronized_set_##name(int value) {         \
+    RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+  }
+
 
 #define BOOL_GETTER(holder, field, name, offset)           \
   bool holder::name() {                                    \
@@ -1095,9 +1104,25 @@
 #define READ_FIELD(p, offset) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
 
+#define ACQUIRE_READ_FIELD(p, offset)                                    \
+  reinterpret_cast<Object*>(                                             \
+      Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
+
+#define NO_BARRIER_READ_FIELD(p, offset)                                    \
+  reinterpret_cast<Object*>(                                                \
+      NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
+
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
 
+#define RELEASE_WRITE_FIELD(p, offset, value)                           \
+  Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)),   \
+                reinterpret_cast<AtomicWord>(value));
+
+#define NO_BARRIER_WRITE_FIELD(p, offset, value)                           \
+  NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)),    \
+                  reinterpret_cast<AtomicWord>(value));
+
 #define WRITE_BARRIER(heap, object, offset, value)                      \
   heap->incremental_marking()->RecordWrite(                             \
       object, HeapObject::RawField(object, offset), value);             \
@@ -1348,6 +1373,21 @@
 }
 
 
+Map* HeapObject::synchronized_map() {
+  return synchronized_map_word().ToMap();
+}
+
+
+void HeapObject::synchronized_set_map(Map* value) {
+  synchronized_set_map_word(MapWord::FromMap(value));
+  if (value != NULL) {
+    // TODO(1600) We are passing NULL as a slot because maps can never be on
+    // evacuation candidate.
+    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+  }
+}
+
+
 // Unsafe accessor omitting write barrier.
 void HeapObject::set_map_no_write_barrier(Map* value) {
   set_map_word(MapWord::FromMap(value));
@@ -1355,14 +1395,26 @@
 
 
 MapWord HeapObject::map_word() {
-  return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
+  return MapWord(
+      reinterpret_cast<uintptr_t>(NO_BARRIER_READ_FIELD(this, kMapOffset)));
 }
 
 
 void HeapObject::set_map_word(MapWord map_word) {
-  // WRITE_FIELD does not invoke write barrier, but there is no need
-  // here.
-  WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+  NO_BARRIER_WRITE_FIELD(
+      this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+}
+
+
+MapWord HeapObject::synchronized_map_word() {
+  return MapWord(
+      reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
+}
+
+
+void HeapObject::synchronized_set_map_word(MapWord map_word) {
+  RELEASE_WRITE_FIELD(
+      this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
 }
 
 
@@ -1698,31 +1750,24 @@
 }
 
 
-void JSObject::set_map_and_elements(Map* new_map,
-                                    FixedArrayBase* value,
-                                    WriteBarrierMode mode) {
-  ASSERT(value->HasValidElements());
-  if (new_map != NULL) {
-    if (mode == UPDATE_WRITE_BARRIER) {
-      set_map(new_map);
-    } else {
-      ASSERT(mode == SKIP_WRITE_BARRIER);
-      set_map_no_write_barrier(new_map);
-    }
-  }
-  ASSERT((map()->has_fast_smi_or_object_elements() ||
-          (value == GetHeap()->empty_fixed_array())) ==
-         (value->map() == GetHeap()->fixed_array_map() ||
-          value->map() == GetHeap()->fixed_cow_array_map()));
-  ASSERT((value == GetHeap()->empty_fixed_array()) ||
-         (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
-  WRITE_FIELD(this, kElementsOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
+void JSObject::SetMapAndElements(Handle<JSObject> object,
+                                 Handle<Map> new_map,
+                                 Handle<FixedArrayBase> value) {
+  JSObject::MigrateToMap(object, new_map);
+  ASSERT((object->map()->has_fast_smi_or_object_elements() ||
+          (*value == object->GetHeap()->empty_fixed_array())) ==
+         (value->map() == object->GetHeap()->fixed_array_map() ||
+          value->map() == object->GetHeap()->fixed_cow_array_map()));
+  ASSERT((*value == object->GetHeap()->empty_fixed_array()) ||
+         (object->map()->has_fast_double_elements() ==
+          value->IsFixedDoubleArray()));
+  object->set_elements(*value);
 }
 
 
 void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
-  set_map_and_elements(NULL, value, mode);
+  WRITE_FIELD(this, kElementsOffset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
 }
 
 
@@ -2947,9 +2992,12 @@
 
 
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
 SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
 
 
 uint32_t Name::hash_field() {
diff --git a/src/objects.cc b/src/objects.cc
index 9a6b26a..92ec283 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1284,34 +1284,39 @@
   // In either case we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
+  Map* new_map;
   if (size < ExternalString::kSize ||
       heap->old_pointer_space()->Contains(this)) {
-    this->set_map_no_write_barrier(
-        is_internalized
-            ? (is_ascii
-                ? heap->
-                    short_external_internalized_string_with_one_byte_data_map()
-                : heap->short_external_internalized_string_map())
-            : (is_ascii
-                ? heap->short_external_string_with_one_byte_data_map()
-                : heap->short_external_string_map()));
+    new_map = is_internalized
+        ? (is_ascii
+            ? heap->
+                short_external_internalized_string_with_one_byte_data_map()
+            : heap->short_external_internalized_string_map())
+        : (is_ascii
+            ? heap->short_external_string_with_one_byte_data_map()
+            : heap->short_external_string_map());
   } else {
-    this->set_map_no_write_barrier(
-        is_internalized
-            ? (is_ascii
-                ? heap->external_internalized_string_with_one_byte_data_map()
-                : heap->external_internalized_string_map())
-            : (is_ascii
-                ? heap->external_string_with_one_byte_data_map()
-                : heap->external_string_map()));
+    new_map = is_internalized
+        ? (is_ascii
+            ? heap->external_internalized_string_with_one_byte_data_map()
+            : heap->external_internalized_string_map())
+        : (is_ascii
+            ? heap->external_string_with_one_byte_data_map()
+            : heap->external_string_map());
   }
+
+  // Byte size of the external String object.
+  int new_size = this->SizeFromMap(new_map);
+  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+
+  // We are storing the new map using release store after creating a filler for
+  // the left-over space to avoid races with the sweeper thread.
+  this->synchronized_set_map(new_map);
+
   ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
   self->set_resource(resource);
   if (is_internalized) self->Hash();  // Force regeneration of the hash value.
 
-  // Fill the remainder of the string with dead wood.
-  int new_size = this->Size();  // Byte size of the external String object.
-  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
   heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
   return true;
 }
@@ -1351,23 +1356,30 @@
   // In either case we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
+  Map* new_map;
   if (size < ExternalString::kSize ||
       heap->old_pointer_space()->Contains(this)) {
-    this->set_map_no_write_barrier(
-        is_internalized ? heap->short_external_ascii_internalized_string_map()
-                        : heap->short_external_ascii_string_map());
+    new_map = is_internalized
+        ? heap->short_external_ascii_internalized_string_map()
+        : heap->short_external_ascii_string_map();
   } else {
-    this->set_map_no_write_barrier(
-        is_internalized ? heap->external_ascii_internalized_string_map()
-                        : heap->external_ascii_string_map());
+    new_map = is_internalized
+        ? heap->external_ascii_internalized_string_map()
+        : heap->external_ascii_string_map();
   }
+
+  // Byte size of the external String object.
+  int new_size = this->SizeFromMap(new_map);
+  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+
+  // We are storing the new map using release store after creating a filler for
+  // the left-over space to avoid races with the sweeper thread.
+  this->synchronized_set_map(new_map);
+
   ExternalAsciiString* self = ExternalAsciiString::cast(this);
   self->set_resource(resource);
   if (is_internalized) self->Hash();  // Force regeneration of the hash value.
 
-  // Fill the remainder of the string with dead wood.
-  int new_size = this->Size();  // Byte size of the external String object.
-  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
   heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
   return true;
 }
@@ -2301,7 +2313,9 @@
   // we still do it.
   heap->CreateFillerObjectAt(new_end, size_delta);
 
-  elms->set_length(len - to_trim);
+  // We are storing the new length using release store after creating a filler
+  // for the left-over space to avoid races with the sweeper thread.
+  elms->synchronized_set_length(len - to_trim);
 
   heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
 
@@ -2376,7 +2390,9 @@
   // converted to doubles.
   if (!old_map->InstancesNeedRewriting(
           *new_map, number_of_fields, inobject, unused)) {
-    object->set_map(*new_map);
+    // Writing the new map here does not require synchronization since it does
+    // not change the actual object size.
+    object->synchronized_set_map(*new_map);
     return;
   }
 
@@ -2446,6 +2462,11 @@
   int instance_size_delta = old_map->instance_size() - new_instance_size;
   ASSERT(instance_size_delta >= 0);
   Address address = object->address() + new_instance_size;
+
+  // The trimming is performed on a newly allocated object, which is on a
+  // fresly allocated page or on an already swept page. Hence, the sweeper
+  // thread can not get confused with the filler creation. No synchronization
+  // needed.
   isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
 
   // If there are properties in the new backing store, trim it to the correct
@@ -2455,6 +2476,10 @@
     object->set_properties(*array);
   }
 
+  // The trimming is performed on a newly allocated object, which is on a
+  // fresly allocated page or on an already swept page. Hence, the sweeper
+  // thread can not get confused with the filler creation. No synchronization
+  // needed.
   object->set_map(*new_map);
 }
 
@@ -4645,7 +4670,10 @@
                         -instance_size_delta,
                         Heap::FROM_MUTATOR);
 
-  object->set_map(*new_map);
+  // We are storing the new map using release store after creating a filler for
+  // the left-over space to avoid races with the sweeper thread.
+  object->synchronized_set_map(*new_map);
+
   map->NotifyLeafMapLayoutChange();
 
   object->set_properties(*dictionary);
@@ -4672,6 +4700,13 @@
 }
 
 
+void JSObject::ResetElements(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION_VOID(
+      object->GetIsolate(),
+      object->ResetElements());
+}
+
+
 static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
     Handle<FixedArrayBase> array,
     int length,
@@ -5641,6 +5676,15 @@
 }
 
 
+Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+                                        Representation representation,
+                                        int index) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     object->FastPropertyAt(representation, index), Object);
+}
+
+
 template<class ContextObject>
 class JSObjectWalkVisitor {
  public:
@@ -9149,7 +9193,6 @@
   }
 
   int delta = old_size - new_size;
-  string->set_length(new_length);
 
   Address start_of_string = string->address();
   ASSERT_OBJECT_ALIGNED(start_of_string);
@@ -9168,6 +9211,10 @@
   }
   heap->AdjustLiveBytes(start_of_string, -delta, Heap::FROM_MUTATOR);
 
+  // We are storing the new length using release store after creating a filler
+  // for the left-over space to avoid races with the sweeper thread.
+  string->synchronized_set_length(new_length);
+
   if (new_length == 0) return heap->isolate()->factory()->empty_string();
   return string;
 }
@@ -11174,7 +11221,7 @@
         ? GetElementsTransitionMap(object, new_elements_kind)
         : handle(object->map());
     object->ValidateElements();
-    object->set_map_and_elements(*new_map, *new_elements);
+    JSObject::SetMapAndElements(object, new_map, new_elements);
 
     // Transition through the allocation site as well if present.
     JSObject::UpdateAllocationSite(object, new_elements_kind);
@@ -11220,7 +11267,7 @@
   accessor->CopyElements(object, elems, elements_kind);
 
   object->ValidateElements();
-  object->set_map_and_elements(*new_map, *elems);
+  JSObject::SetMapAndElements(object, new_map, elems);
 
   if (FLAG_trace_elements_transitions) {
     PrintElementsTransition(stdout, object, elements_kind, old_elements,
@@ -14360,7 +14407,7 @@
     dict->CopyValuesTo(*fast_elements);
     object->ValidateElements();
 
-    object->set_map_and_elements(*new_map, *fast_elements);
+    JSObject::SetMapAndElements(object, new_map, fast_elements);
   } else if (object->HasExternalArrayElements() ||
              object->HasFixedTypedArrayElements()) {
     // Typed arrays cannot have holes or undefined elements.
@@ -16354,7 +16401,7 @@
   buffer->set_weak_first_view(*typed_array);
   ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value());
   typed_array->set_buffer(*buffer);
-  typed_array->set_map_and_elements(*new_map, *new_elements);
+  JSObject::SetMapAndElements(typed_array, new_map, new_elements);
 
   return buffer;
 }
diff --git a/src/objects.h b/src/objects.h
index abfa963..98bf122 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1292,6 +1292,8 @@
   V(kOperandIsNotAString, "Operand is not a string")                          \
   V(kOperandIsNotSmi, "Operand is not smi")                                   \
   V(kOperandNotANumber, "Operand not a number")                               \
+  V(kObjectTagged, "The object is tagged")                                    \
+  V(kObjectNotTagged, "The object is not tagged")                             \
   V(kOptimizationDisabled, "Optimization is disabled")                        \
   V(kOptimizedTooManyTimes, "Optimized too many times")                       \
   V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                  \
@@ -1814,6 +1816,14 @@
   // of primitive (non-JS) objects like strings, heap numbers etc.
   inline void set_map_no_write_barrier(Map* value);
 
+  // Get the map using acquire load.
+  inline Map* synchronized_map();
+  inline MapWord synchronized_map_word();
+
+  // Set the map using release store
+  inline void synchronized_set_map(Map* value);
+  inline void synchronized_set_map_word(MapWord map_word);
+
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
   inline MapWord map_word();
@@ -2189,6 +2199,10 @@
   DECL_ACCESSORS(elements, FixedArrayBase)
   inline void initialize_elements();
   MUST_USE_RESULT inline MaybeObject* ResetElements();
+  static void ResetElements(Handle<JSObject> object);
+  static inline void SetMapAndElements(Handle<JSObject> object,
+                                       Handle<Map> map,
+                                       Handle<FixedArrayBase> elements);
   inline ElementsKind GetElementsKind();
   inline ElementsAccessor* GetElementsAccessor();
   // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
@@ -2237,11 +2251,6 @@
   bool HasDictionaryArgumentsElements();
   inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
 
-  inline void set_map_and_elements(
-      Map* map,
-      FixedArrayBase* value,
-      WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
   // Requires: HasFastElements().
   static Handle<FixedArray> EnsureWritableFastElements(
       Handle<JSObject> object);
@@ -2616,6 +2625,9 @@
   MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
       Representation representation,
       int index);
+  static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+                                       Representation representation,
+                                       int index);
   inline Object* RawFastPropertyAt(int index);
   inline void FastPropertyAtPut(int index, Object* value);
 
@@ -3011,6 +3023,10 @@
   inline int length();
   inline void set_length(int value);
 
+  // Get and set the length using acquire loads and release stores.
+  inline int synchronized_length();
+  inline void synchronized_set_length(int value);
+
   inline static FixedArrayBase* cast(Object* object);
 
   // Layout description.
@@ -8783,6 +8799,11 @@
   inline int length();
   inline void set_length(int value);
 
+  // Get and set the length of the string using acquire loads and release
+  // stores.
+  inline int synchronized_length();
+  inline void synchronized_set_length(int value);
+
   // Returns whether this string has only ASCII chars, i.e. all of them can
   // be ASCII encoded.  This might be the case even if the string is
   // two-byte.  Such strings may appear when the embedder prefers
diff --git a/src/runtime.cc b/src/runtime.cc
index 849de3c..48e81e2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1010,7 +1010,7 @@
             static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
     Handle<Map> map =
         JSObject::GetElementsTransitionMap(holder, external_elements_kind);
-    holder->set_map_and_elements(*map, *elements);
+    JSObject::SetMapAndElements(holder, map, elements);
     ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind()));
   } else {
     holder->set_buffer(Smi::FromInt(0));
@@ -1107,7 +1107,7 @@
           static_cast<uint8_t*>(buffer->backing_store()));
   Handle<Map> map = JSObject::GetElementsTransitionMap(
       holder, external_elements_kind);
-  holder->set_map_and_elements(*map, *elements);
+  JSObject::SetMapAndElements(holder, map, elements);
 
   if (source->IsJSTypedArray()) {
     Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
@@ -4201,6 +4201,11 @@
 
   Address end_of_string = answer->address() + string_size;
   Heap* heap = isolate->heap();
+
+  // The trimming is performed on a newly allocated object, which is on a
+  // fresly allocated page or on an already swept page. Hence, the sweeper
+  // thread can not get confused with the filler creation. No synchronization
+  // needed.
   heap->CreateFillerObjectAt(end_of_string, delta);
   heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
   return *answer;
@@ -10634,27 +10639,24 @@
 
 // Move contents of argument 0 (an array) to argument 1 (an array)
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSArray, from, 0);
-  CONVERT_ARG_CHECKED(JSArray, to, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
   from->ValidateElements();
   to->ValidateElements();
-  FixedArrayBase* new_elements = from->elements();
+
+  Handle<FixedArrayBase> new_elements(from->elements());
   ElementsKind from_kind = from->GetElementsKind();
-  MaybeObject* maybe_new_map;
-  maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind);
-  Object* new_map;
-  if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
-  to->set_map_and_elements(Map::cast(new_map), new_elements);
+  Handle<Map> new_map = JSObject::GetElementsTransitionMap(to, from_kind);
+  JSObject::SetMapAndElements(to, new_map, new_elements);
   to->set_length(from->length());
-  Object* obj;
-  { MaybeObject* maybe_obj = from->ResetElements();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+
+  JSObject::ResetElements(from);
   from->set_length(Smi::FromInt(0));
+
   to->ValidateElements();
-  return to;
+  return *to;
 }
 
 
@@ -14508,6 +14510,19 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LoadMutableDouble) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+  int idx = index->value() >> 1;
+  if (idx < 0) {
+    idx = -idx + object->map()->inobject_properties() - 1;
+  }
+  return *JSObject::FastPropertyAt(object, Representation::Double(), idx);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
diff --git a/src/runtime.h b/src/runtime.h
index be0e545..f84355b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -100,6 +100,7 @@
   F(DebugCallbackSupportsStepping, 1, 1) \
   F(DebugPrepareStepInIfStepping, 1, 1) \
   F(FlattenString, 1, 1) \
+  F(LoadMutableDouble, 2, 1) \
   F(TryMigrateInstance, 1, 1) \
   F(NotifyContextDisposed, 0, 1) \
   \
diff --git a/src/types.cc b/src/types.cc
index e269582..98bb280 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -424,10 +424,8 @@
   }
 
   // Fast case: top or bottom types.
-  if (type1->IsAny()) return type1;
-  if (type2->IsAny()) return type2;
-  if (type1->IsNone()) return type2;
-  if (type2->IsNone()) return type1;
+  if (type1->IsAny() || type2->IsNone()) return type1;
+  if (type2->IsAny() || type1->IsNone()) return type2;
 
   // Semi-fast case: Unioned objects are neither involved nor produced.
   if (!(type1->IsUnion() || type2->IsUnion())) {
@@ -436,19 +434,20 @@
   }
 
   // Slow case: may need to produce a Unioned object.
-  int size = type1->IsBitset() || type2->IsBitset() ? 1 : 0;
+  int size = 0;
   if (!type1->IsBitset()) {
     size += (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 1);
   }
   if (!type2->IsBitset()) {
     size += (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 1);
   }
-  ASSERT(size >= 2);
-  UnionedHandle unioned = Config::union_create(size, region);
-  size = 0;
-
   int bitset = type1->GlbBitset() | type2->GlbBitset();
-  if (bitset != kNone) {
+  if (IsInhabited(bitset)) ++size;
+  ASSERT(size >= 1);
+  UnionedHandle unioned = Config::union_create(size, region);
+
+  size = 0;
+  if (IsInhabited(bitset)) {
     Config::union_set(unioned, size++, Config::from_bitset(bitset, region));
   }
   size = ExtendUnion(unioned, type1, size);
@@ -500,10 +499,8 @@
   }
 
   // Fast case: top or bottom types.
-  if (type1->IsNone()) return type1;
-  if (type2->IsNone()) return type2;
-  if (type1->IsAny()) return type2;
-  if (type2->IsAny()) return type1;
+  if (type1->IsNone() || type2->IsAny()) return type1;
+  if (type2->IsNone() || type1->IsAny()) return type2;
 
   // Semi-fast case: Unioned objects are neither involved nor produced.
   if (!(type1->IsUnion() || type2->IsUnion())) {
@@ -512,20 +509,21 @@
   }
 
   // Slow case: may need to produce a Unioned object.
-  int size = 0;
+  int size = INT_MAX;
   if (!type1->IsBitset()) {
-    size = (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 2);
+    size = (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 1);
   }
   if (!type2->IsBitset()) {
-    int size2 = (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 2);
-    size = (size == 0 ? size2 : Min(size, size2));
+    size = Min(size,
+               type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 1);
   }
-  ASSERT(size >= 2);
-  UnionedHandle unioned = Config::union_create(size, region);
-  size = 0;
-
   int bitset = type1->GlbBitset() & type2->GlbBitset();
-  if (bitset != kNone) {
+  if (IsInhabited(bitset)) ++size;
+  ASSERT(size >= 1);
+  UnionedHandle unioned = Config::union_create(size, region);
+
+  size = 0;
+  if (IsInhabited(bitset)) {
     Config::union_set(unioned, size++, Config::from_bitset(bitset, region));
   }
   size = ExtendIntersection(unioned, type1, type2, size);
@@ -645,7 +643,7 @@
     switch (dim) {
       case BOTH_DIMS:
         BitsetTypePrint(out, bitset & kSemantic);
-        PrintF("/");
+        PrintF(out, "/");
         BitsetTypePrint(out, bitset & kRepresentation);
         break;
       case SEMANTIC_DIM:
@@ -658,18 +656,18 @@
   } else if (this->IsConstant()) {
     PrintF(out, "Constant(%p : ", static_cast<void*>(*this->AsConstant()));
     Config::from_bitset(this->LubBitset())->TypePrint(out);
-    PrintF(")");
+    PrintF(out, ")");
   } else if (this->IsClass()) {
     PrintF(out, "Class(%p < ", static_cast<void*>(*this->AsClass()));
     Config::from_bitset(this->LubBitset())->TypePrint(out);
-    PrintF(")");
+    PrintF(out, ")");
   } else if (this->IsUnion()) {
     PrintF(out, "(");
     UnionedHandle unioned = this->AsUnion();
     for (int i = 0; i < Config::union_length(unioned); ++i) {
       TypeHandle type_i = Config::union_get(unioned, i);
       if (i > 0) PrintF(out, " | ");
-      type_i->TypePrint(out);
+      type_i->TypePrint(out, dim);
     }
     PrintF(out, ")");
   }
diff --git a/src/version.cc b/src/version.cc
index 0df368a..d8ae4f7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     26
-#define BUILD_NUMBER      0
+#define BUILD_NUMBER      1
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 6d1e956..97cd460 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -107,11 +107,15 @@
     Isolate* isolate = masm_->isolate();
     Label ok;
     ASSERT(scratch.is(rsp) == (pointers == 0));
+    Heap::RootListIndex index;
     if (pointers != 0) {
       __ movp(scratch, rsp);
       __ subp(scratch, Immediate(pointers * kPointerSize));
+      index = Heap::kRealStackLimitRootIndex;
+    } else {
+      index = Heap::kStackLimitRootIndex;
     }
-    __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+    __ CompareRoot(scratch, index);
     __ j(above_equal, &ok, Label::kNear);
     __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
     __ bind(&ok);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 849e2d4..ac6d370 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -5564,11 +5564,55 @@
 }
 
 
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object);
+  __ Push(index);
+  __ xorp(rsi, rsi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() V8_OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
   Register object = ToRegister(instr->object());
   Register index = ToRegister(instr->index());
 
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
   Label out_of_object, done;
+  __ Move(kScratchRegister, Smi::FromInt(1));
+  __ testp(index, kScratchRegister);
+  __ j(not_zero, deferred->entry());
+
+  __ sarp(index, Immediate(1));
+
   __ SmiToInteger32(index, index);
   __ cmpl(index, Immediate(0));
   __ j(less, &out_of_object, Label::kNear);
@@ -5586,6 +5630,7 @@
                                index,
                                times_pointer_size,
                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
   __ bind(&done);
 }
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 37807ed..23c63c8 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -116,6 +116,9 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);
 
 // Parallel move support.
   void DoParallelMove(LParallelMove* move);
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 0dba33d..9d99e91 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -2589,7 +2589,9 @@
 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
   LOperand* object = UseRegister(instr->object());
   LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
 }
 
 
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index c1f20f1..25d19af 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -3913,3 +3913,32 @@
 }
 
 #endif  // DEBUG
+
+
+static void InterruptCallback357137(v8::Isolate* isolate, void* data) { }
+
+
+static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
+}
+
+
+TEST(Regress357137) {
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope hscope(isolate);
+  v8::Handle<v8::ObjectTemplate> global =v8::ObjectTemplate::New(isolate);
+  global->Set(v8::String::NewFromUtf8(isolate, "interrupt"),
+              v8::FunctionTemplate::New(isolate, RequestInterrupt));
+  v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+  ASSERT(!context.IsEmpty());
+  v8::Context::Scope cscope(context);
+
+  v8::Local<v8::Value> result = CompileRun(
+      "var locals = '';"
+      "for (var i = 0; i < 512; i++) locals += 'var v' + i + '= 42;';"
+      "eval('function f() {' + locals + 'return function() { return v0; }; }');"
+      "interrupt();"  // This triggers a fake stack overflow in f.
+      "f()()");
+  CHECK_EQ(42.0, result->ToNumber()->Value());
+}
diff --git a/test/mjsunit/fuzz-natives-part1.js b/test/mjsunit/fuzz-natives-part1.js
index 0bd0dc1..2059bb0 100644
--- a/test/mjsunit/fuzz-natives-part1.js
+++ b/test/mjsunit/fuzz-natives-part1.js
@@ -182,6 +182,9 @@
   // Only applicable to TypedArrays.
   "_TypedArrayInitialize": true,
 
+  // Only applicable to loading mutable doubles.
+  "LoadMutableDouble": true,
+
   // Only applicable to generators.
   "_GeneratorNext": true,
   "_GeneratorThrow": true,
diff --git a/test/mjsunit/regress/regress-358057.js b/test/mjsunit/regress/regress-358057.js
new file mode 100644
index 0000000..c5fe73a
--- /dev/null
+++ b/test/mjsunit/regress/regress-358057.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+__v_0 = new Uint8ClampedArray(10);
+for (var i = 0; i < 10; i++) {
+  __v_0[i] = 0xAA;
+}
+function __f_12(__v_6) {
+  if (__v_6 < 0) {
+    __v_1 = __v_0[__v_6 + 10];
+    return __v_1;
+  }
+}
+
+assertEquals(0xAA, __f_12(-1));
+%OptimizeFunctionOnNextCall(__f_12);
+assertEquals(0xAA, __f_12(-1));
diff --git a/test/mjsunit/regress/regress-358090.js b/test/mjsunit/regress/regress-358090.js
new file mode 100644
index 0000000..d9c07e8
--- /dev/null
+++ b/test/mjsunit/regress/regress-358090.js
@@ -0,0 +1,8 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = Array(100000);
+y =  Array.apply(Array, x);
+y.unshift(4);
+y.shift();
diff --git a/test/mjsunit/regress/regress-crbug-357137.js b/test/mjsunit/regress/regress-crbug-357137.js
new file mode 100644
index 0000000..a780426
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-357137.js
@@ -0,0 +1,8 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var locals = "";
+for (var i = 0; i < 1024; i++) locals += "var v" + i + ";";
+eval("function f() {" + locals + "f();}");
+assertThrows("f()", RangeError);
diff --git a/test/mjsunit/regress/regress-crbug-357330.js b/test/mjsunit/regress/regress-crbug-357330.js
new file mode 100644
index 0000000..b3edf00
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-357330.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(foo) {
+  var g;
+  true ? (g = foo + 0) : g = null;
+  if (null != g) {}
+};
+
+f(1.4);
+f(1.4);
+%OptimizeFunctionOnNextCall(f);
+f(1.4);
diff --git a/test/mjsunit/regress/regress-handle-illegal-redeclaration.js b/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
new file mode 100644
index 0000000..fe04ddb
--- /dev/null
+++ b/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+var x = 0;
+
+function f() {
+  const c;
+  var c;
+  return 0 + x;
+}
+
+assertThrows(f);
diff --git a/test/mjsunit/regress/regress-load-field-by-index.js b/test/mjsunit/regress/regress-load-field-by-index.js
new file mode 100644
index 0000000..c572c1e
--- /dev/null
+++ b/test/mjsunit/regress/regress-load-field-by-index.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {a:1.5, b:{}};
+
+function f(o) {
+  var result = [];
+  for (var k in o) {
+    result[result.length] = o[k];
+  }
+  return result;
+}
+
+f(o);
+f(o);
+%OptimizeFunctionOnNextCall(f);
+var array = f(o);
+o.a = 1.7;
+assertEquals(1.5, array[0]);