Version 3.24.5

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@18372 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 0bbe65d..3c46e79 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-12-19: Version 3.24.5
+
+        Performance and stability improvements on all platforms.
+
+
 2013-12-18: Version 3.24.4
 
         Removed all stuff marked as V8_DEPRECATED.
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 7922d6c..dc1f185 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -460,7 +460,7 @@
   HValue* size_in_bytes = Add<HConstant>(size);
 
   HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
-      isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
+      NOT_TENURED, JS_OBJECT_TYPE);
 
   for (int i = 0; i < object_size; i += kPointerSize) {
     HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 275beb4..9003852 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -585,6 +585,28 @@
 }
 
 
+void ArrayConstructorStub::PrintName(StringStream* stream) {
+  stream->Add("ArrayConstructorStub");
+  switch (argument_count_) {
+    case ANY: stream->Add("_Any"); break;
+    case NONE: stream->Add("_None"); break;
+    case ONE: stream->Add("_One"); break;
+    case MORE_THAN_ONE: stream->Add("_More_Than_One"); break;
+  }
+}
+
+
+void ArrayConstructorStubBase::BasePrintName(const char* name,
+                                             StringStream* stream) {
+  stream->Add(name);
+  stream->Add("_");
+  stream->Add(ElementsKindToString(elements_kind()));
+  if (override_mode() == DISABLE_ALLOCATION_SITES) {
+    stream->Add("_DISABLE_ALLOCATION_SITES");
+  }
+}
+
+
 bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
   Types old_types(types_);
   bool to_boolean_value = types_.UpdateStatus(object);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 2ab62b0..5678ebd 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -741,6 +741,7 @@
  private:
   void GenerateDispatchToArrayStub(MacroAssembler* masm,
                                    AllocationSiteOverrideMode mode);
+  virtual void PrintName(StringStream* stream);
 
   virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
   virtual int MinorKey() { return argument_count_; }
@@ -1933,6 +1934,9 @@
   static const int kConstructor = 0;
   static const int kPropertyCell = 1;
 
+ protected:
+  void BasePrintName(const char* name, StringStream* stream);
+
  private:
   int NotMissMinorKey() { return bit_field_; }
 
@@ -1968,6 +1972,10 @@
  private:
   Major MajorKey() { return ArrayNoArgumentConstructor; }
 
+  virtual void PrintName(StringStream* stream) {
+    BasePrintName("ArrayNoArgumentConstructorStub", stream);
+  }
+
   DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
 };
 
@@ -1990,6 +1998,10 @@
  private:
   Major MajorKey() { return ArraySingleArgumentConstructor; }
 
+  virtual void PrintName(StringStream* stream) {
+    BasePrintName("ArraySingleArgumentConstructorStub", stream);
+  }
+
   DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
 };
 
@@ -2012,6 +2024,10 @@
  private:
   Major MajorKey() { return ArrayNArgumentsConstructor; }
 
+  virtual void PrintName(StringStream* stream) {
+    BasePrintName("ArrayNArgumentsConstructorStub", stream);
+  }
+
   DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
 };
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 658991f..1230801 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -69,6 +69,9 @@
 #define DEFINE_implication(whenflag, thenflag) \
   if (FLAG_##whenflag) FLAG_##thenflag = true;
 
+#define DEFINE_neg_implication(whenflag, thenflag) \
+  if (FLAG_##whenflag) FLAG_##thenflag = false;
+
 #else
 #error No mode supplied when including flags.defs
 #endif
@@ -90,6 +93,10 @@
 #define DEFINE_implication(whenflag, thenflag)
 #endif
 
+#ifndef DEFINE_neg_implication
+#define DEFINE_neg_implication(whenflag, thenflag)
+#endif
+
 #define COMMA ,
 
 #ifdef FLAG_MODE_DECLARE
@@ -628,6 +635,14 @@
 DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
             "Print the time it takes to lazily compile hydrogen code stubs.")
 
+DEFINE_bool(predictable, false, "enable predictable mode")
+DEFINE_neg_implication(predictable, randomize_hashes)
+DEFINE_neg_implication(predictable, concurrent_recompilation)
+DEFINE_neg_implication(predictable, concurrent_osr)
+DEFINE_neg_implication(predictable, concurrent_sweeping)
+DEFINE_neg_implication(predictable, parallel_sweeping)
+
+
 //
 // Dev shell flags
 //
@@ -875,6 +890,7 @@
 #undef DEFINE_float
 #undef DEFINE_args
 #undef DEFINE_implication
+#undef DEFINE_neg_implication
 #undef DEFINE_ALIAS_bool
 #undef DEFINE_ALIAS_int
 #undef DEFINE_ALIAS_string
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 11783b3..3dce348 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -484,12 +484,21 @@
 
 
 void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
-  if (FLAG_allocation_site_pretenuring && object->IsJSObject()) {
-    AllocationMemento* memento = AllocationMemento::FindForJSObject(
-        JSObject::cast(object), true);
+  if (FLAG_allocation_site_pretenuring &&
+      AllocationSite::CanTrack(object->map()->instance_type())) {
+    AllocationMemento* memento = AllocationMemento::FindForHeapObject(
+        object, true);
     if (memento != NULL) {
       ASSERT(memento->IsValid());
-      memento->GetAllocationSite()->IncrementMementoFoundCount();
+      bool add_to_scratchpad =
+          memento->GetAllocationSite()->IncrementMementoFoundCount();
+      Heap* heap = object->GetIsolate()->heap();
+      if (add_to_scratchpad && heap->allocation_sites_scratchpad_length <
+              kAllocationSiteScratchpadSize) {
+        heap->allocation_sites_scratchpad[
+            heap->allocation_sites_scratchpad_length++] =
+                memento->GetAllocationSite();
+      }
     }
   }
 }
diff --git a/src/heap.cc b/src/heap.cc
index f6c6ae6..c42c445 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -148,6 +148,7 @@
 #ifdef VERIFY_HEAP
       no_weak_object_verification_scope_depth_(0),
 #endif
+      allocation_sites_scratchpad_length(0),
       promotion_queue_(this),
       configured_(false),
       chunks_queued_for_free_(NULL),
@@ -503,25 +504,45 @@
 }
 
 
-void Heap::GarbageCollectionEpilogue() {
+void Heap::ProcessPretenuringFeedback() {
   if (FLAG_allocation_site_pretenuring) {
     int tenure_decisions = 0;
     int dont_tenure_decisions = 0;
     int allocation_mementos_found = 0;
+    int allocation_sites = 0;
+    int active_allocation_sites = 0;
 
-    Object* cur = allocation_sites_list();
-    while (cur->IsAllocationSite()) {
-      AllocationSite* casted = AllocationSite::cast(cur);
-      allocation_mementos_found += casted->memento_found_count()->value();
-      if (casted->DigestPretenuringFeedback()) {
-        if (casted->GetPretenureMode() == TENURED) {
+    // If the scratchpad overflowed, we have to iterate over the allocation
+    // stites list.
+    bool use_scratchpad =
+        allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize;
+
+    int i = 0;
+    Object* list_element = allocation_sites_list();
+    while (use_scratchpad ?
+              i < allocation_sites_scratchpad_length :
+              list_element->IsAllocationSite()) {
+      AllocationSite* site = use_scratchpad ?
+        allocation_sites_scratchpad[i] : AllocationSite::cast(list_element);
+      allocation_mementos_found += site->memento_found_count()->value();
+      if (site->memento_found_count()->value() > 0) {
+        active_allocation_sites++;
+      }
+      if (site->DigestPretenuringFeedback()) {
+        if (site->GetPretenureMode() == TENURED) {
           tenure_decisions++;
         } else {
           dont_tenure_decisions++;
         }
       }
-      cur = casted->weak_next();
+      allocation_sites++;
+      if (use_scratchpad) {
+        i++;
+      } else {
+        list_element = site->weak_next();
+      }
     }
+    allocation_sites_scratchpad_length = 0;
 
     // TODO(mvstanton): Pretenure decisions are only made once for an allocation
     // site. Find a sane way to decide about revisiting the decision later.
@@ -530,14 +551,21 @@
         (allocation_mementos_found > 0 ||
          tenure_decisions > 0 ||
          dont_tenure_decisions > 0)) {
-      PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
-             "(%d, %d, %d)\n",
+      PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
+             "#mementos, #tenure decisions, #donttenure decisions) "
+             "(%s, %d, %d, %d, %d, %d)\n",
+             use_scratchpad ? "use scratchpad" : "use list",
+             allocation_sites,
+             active_allocation_sites,
              allocation_mementos_found,
              tenure_decisions,
              dont_tenure_decisions);
     }
   }
+}
 
+
+void Heap::GarbageCollectionEpilogue() {
   store_buffer()->GCEpilogue();
 
   // In release mode, we only zap the from space under heap verification.
@@ -1564,6 +1592,8 @@
   IncrementYoungSurvivorsCounter(static_cast<int>(
       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
 
+  ProcessPretenuringFeedback();
+
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
diff --git a/src/heap.h b/src/heap.h
index 871c4d8..d05f019 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -2057,6 +2057,11 @@
   void GarbageCollectionPrologue();
   void GarbageCollectionEpilogue();
 
+  // Pretenuring decisions are made based on feedback collected during new
+  // space evacuation. Note that between feedback collection and calling this
+  // method object in old space must not move.
+  void ProcessPretenuringFeedback();
+
   // Checks whether a global GC is necessary
   GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                           const char** reason);
@@ -2383,6 +2388,11 @@
   int no_weak_object_verification_scope_depth_;
 #endif
 
+
+  static const int kAllocationSiteScratchpadSize = 256;
+  int allocation_sites_scratchpad_length;
+  AllocationSite* allocation_sites_scratchpad[kAllocationSiteScratchpadSize];
+
   static const int kMaxMarkSweepsInIdleRound = 7;
   static const int kIdleScavengeThreshold = 5;
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 43235c2..46893e8 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -3640,7 +3640,7 @@
     HObjectAccess access = HObjectAccess::ForJSObjectOffset(offset);
     HStoreNamedField* clear_next_map =
         HStoreNamedField::New(zone, context(), this, access,
-            block()->graph()->GetConstantNull());
+            block()->graph()->GetConstant0());
     clear_next_map->ClearAllSideEffects();
     clear_next_map->InsertAfter(this);
   }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index df8d2dc..fde3abc 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -6407,7 +6407,11 @@
             (!IsHoleyElementsKind(elements_kind) ||
              mode == NEVER_RETURN_HOLE)) {
           set_type(HType::Smi());
-          set_representation(Representation::Smi());
+          if (SmiValuesAre32Bits() && !RequiresHoleCheck()) {
+            set_representation(Representation::Integer32());
+          } else {
+            set_representation(Representation::Smi());
+          }
         } else {
           set_representation(Representation::Tagged());
         }
@@ -6499,10 +6503,20 @@
 };
 
 
+// Indicates whether the store is a store to an entry that was previously
+// initialized or not.
+enum StoreFieldOrKeyedMode {
+  INITIALIZING_STORE,
+  STORE_TO_INITIALIZED_ENTRY
+};
+
+
 class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
                                  HObjectAccess, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*,
+                                 HObjectAccess, HValue*, StoreFieldOrKeyedMode);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
@@ -6523,8 +6537,12 @@
           field_representation().IsUInteger16() ||
           field_representation().IsInteger32()) {
         return Representation::Integer32();
-      } else if (field_representation().IsDouble() ||
-                 field_representation().IsSmi()) {
+      } else if (field_representation().IsDouble()) {
+        return field_representation();
+      } else if (field_representation().IsSmi()) {
+        if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+          return Representation::Integer32();
+        }
         return field_representation();
       } else if (field_representation().IsExternal()) {
         return Representation::External();
@@ -6551,6 +6569,7 @@
   HObjectAccess access() const { return access_; }
   HValue* new_space_dominator() const { return new_space_dominator_; }
   bool has_transition() const { return has_transition_; }
+  StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
 
   Handle<Map> transition_map() const {
     if (has_transition()) {
@@ -6601,11 +6620,13 @@
  private:
   HStoreNamedField(HValue* obj,
                    HObjectAccess access,
-                   HValue* val)
+                   HValue* val,
+                   StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
       : access_(access),
         new_space_dominator_(NULL),
         write_barrier_mode_(UPDATE_WRITE_BARRIER),
-        has_transition_(false) {
+        has_transition_(false),
+        store_mode_(store_mode) {
     SetOperandAt(0, obj);
     SetOperandAt(1, val);
     SetOperandAt(2, obj);
@@ -6616,6 +6637,7 @@
   HValue* new_space_dominator_;
   WriteBarrierMode write_barrier_mode_ : 1;
   bool has_transition_ : 1;
+  StoreFieldOrKeyedMode store_mode_ : 1;
 };
 
 
@@ -6662,6 +6684,8 @@
  public:
   DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
                                  ElementsKind);
+  DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
+                                 ElementsKind, StoreFieldOrKeyedMode);
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
     // kind_fast:       tagged[int32] = tagged
@@ -6680,7 +6704,9 @@
     if (IsDoubleOrFloatElementsKind(elements_kind())) {
       return Representation::Double();
     }
-
+    if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+      return Representation::Integer32();
+    }
     if (IsFastSmiElementsKind(elements_kind())) {
       return Representation::Smi();
     }
@@ -6698,12 +6724,15 @@
     if (IsUninitialized()) {
       return Representation::None();
     }
-    if (IsFastSmiElementsKind(elements_kind())) {
-      return Representation::Smi();
-    }
     if (IsDoubleOrFloatElementsKind(elements_kind())) {
       return Representation::Double();
     }
+    if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+      return Representation::Integer32();
+    }
+    if (IsFastSmiElementsKind(elements_kind())) {
+      return Representation::Smi();
+    }
     if (is_external()) {
       return Representation::Integer32();
     }
@@ -6717,6 +6746,7 @@
   bool value_is_smi() const {
     return IsFastSmiElementsKind(elements_kind_);
   }
+  StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
   ElementsKind elements_kind() const { return elements_kind_; }
   uint32_t index_offset() { return index_offset_; }
   void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
@@ -6762,16 +6792,21 @@
 
  private:
   HStoreKeyed(HValue* obj, HValue* key, HValue* val,
-              ElementsKind elements_kind)
+              ElementsKind elements_kind,
+              StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
       : elements_kind_(elements_kind),
       index_offset_(0),
       is_dehoisted_(false),
       is_uninitialized_(false),
+      store_mode_(store_mode),
       new_space_dominator_(NULL) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
 
+    ASSERT(store_mode != STORE_TO_INITIALIZED_ENTRY ||
+           elements_kind == FAST_SMI_ELEMENTS);
+
     if (IsFastObjectElementsKind(elements_kind)) {
       SetFlag(kTrackSideEffectDominators);
       SetGVNFlag(kDependsOnNewSpacePromotion);
@@ -6798,6 +6833,7 @@
   uint32_t index_offset_;
   bool is_dehoisted_ : 1;
   bool is_uninitialized_ : 1;
+  StoreFieldOrKeyedMode store_mode_: 1;
   HValue* new_space_dominator_;
 };
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index b3549f5..cccfa5b 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1296,7 +1296,8 @@
                                                  ElementsKind kind,
                                                  HValue* length,
                                                  HValue* key,
-                                                 bool is_js_array) {
+                                                 bool is_js_array,
+                                                 bool is_store) {
   IfBuilder length_checker(this);
 
   Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1339,6 +1340,13 @@
                           new_length);
   }
 
+  if (is_store && kind == FAST_SMI_ELEMENTS) {
+    HValue* checked_elements = environment()->Top();
+
+    // Write zero to ensure that the new element is initialized with some smi.
+    Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
+  }
+
   length_checker.Else();
   Add<HBoundsCheck>(key, length);
 
@@ -2102,7 +2110,7 @@
     NoObservableSideEffectsScope no_effects(this);
     elements = BuildCheckForCapacityGrow(checked_object, elements,
                                          elements_kind, length, key,
-                                         is_js_array);
+                                         is_js_array, is_store);
     checked_key = key;
   } else {
     checked_key = Add<HBoundsCheck>(key, length);
@@ -2266,7 +2274,10 @@
     if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
       val = Add<HClampToUint8>(val);
     }
-    return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
+    return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
+                            elements_kind == FAST_SMI_ELEMENTS
+                              ? STORE_TO_INITIALIZED_ENTRY
+                              : INITIALIZING_STORE);
   }
 
   ASSERT(!is_store);
@@ -5206,9 +5217,9 @@
     }
   } else {
     // This is a normal store.
-    instr = New<HStoreNamedField>(checked_object->ActualValue(),
-                                  field_access,
-                                  value);
+    instr = New<HStoreNamedField>(
+        checked_object->ActualValue(), field_access, value,
+        transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
   }
 
   if (transition_to_field) {
@@ -9159,15 +9170,6 @@
     return ast_context()->ReturnInstruction(result, expr->id());
   }
 
-  // Cases handled below depend on collected type feedback. They should
-  // soft deoptimize when there is no type feedback.
-  if (combined_type->Is(Type::None())) {
-    Add<HDeoptimize>("Insufficient type feedback for combined type "
-                     "of binary operation",
-                     Deoptimizer::SOFT);
-    combined_type = left_type = right_type = handle(Type::Any(), isolate());
-  }
-
   HControlInstruction* compare = BuildCompareInstruction(
       op, left, right, left_type, right_type, combined_type,
       expr->left()->position(), expr->right()->position(), expr->id());
@@ -9186,6 +9188,15 @@
     int left_position,
     int right_position,
     BailoutId bailout_id) {
+  // Cases handled below depend on collected type feedback. They should
+  // soft deoptimize when there is no type feedback.
+  if (combined_type->Is(Type::None())) {
+    Add<HDeoptimize>("Insufficient type feedback for combined type "
+                     "of binary operation",
+                     Deoptimizer::SOFT);
+    combined_type = left_type = right_type = handle(Type::Any(), isolate());
+  }
+
   Representation left_rep = Representation::FromType(left_type);
   Representation right_rep = Representation::FromType(right_type);
   Representation combined_rep = Representation::FromType(combined_type);
@@ -9193,10 +9204,11 @@
   if (combined_type->Is(Type::Receiver())) {
     if (Token::IsEqualityOp(op)) {
       // Can we get away with map check and not instance type check?
+      HValue* operand_to_check =
+          left->block()->block_id() < right->block()->block_id() ? left : right;
       if (combined_type->IsClass()) {
         Handle<Map> map = combined_type->AsClass();
-        AddCheckMap(left, map);
-        AddCheckMap(right, map);
+        AddCheckMap(operand_to_check, map);
         HCompareObjectEqAndBranch* result =
             New<HCompareObjectEqAndBranch>(left, right);
         if (FLAG_emit_opt_code_positions) {
@@ -9205,10 +9217,9 @@
         }
         return result;
       } else {
-        BuildCheckHeapObject(left);
-        Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
-        BuildCheckHeapObject(right);
-        Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
+        BuildCheckHeapObject(operand_to_check);
+        Add<HCheckInstanceType>(operand_to_check,
+                                HCheckInstanceType::IS_SPEC_OBJECT);
         HCompareObjectEqAndBranch* result =
             New<HCompareObjectEqAndBranch>(left, right);
         return result;
@@ -9441,7 +9452,7 @@
       Add<HStoreNamedField>(object, access, result);
     } else {
       Representation representation = details.representation();
-      HInstruction* value_instruction = Add<HConstant>(value);
+      HInstruction* value_instruction;
 
       if (representation.IsDouble()) {
         // Allocate a HeapNumber box and store the value into it.
@@ -9456,8 +9467,12 @@
         AddStoreMapConstant(double_box,
             isolate()->factory()->heap_number_map());
         Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
-            value_instruction);
+                              Add<HConstant>(value));
         value_instruction = double_box;
+      } else if (representation.IsSmi() && value->IsUninitialized()) {
+        value_instruction = graph()->GetConstant0();
+      } else {
+        value_instruction = Add<HConstant>(value);
       }
 
       Add<HStoreNamedField>(object, access, value_instruction);
diff --git a/src/hydrogen.h b/src/hydrogen.h
index b2fd14c..ea4da9a 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1272,7 +1272,8 @@
                                     ElementsKind kind,
                                     HValue* length,
                                     HValue* key,
-                                    bool is_js_array);
+                                    bool is_js_array,
+                                    bool is_store);
 
   HValue* BuildCopyElementsOnWrite(HValue* object,
                                    HValue* elements,
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index c0e1039..1e78093 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -3361,6 +3361,13 @@
     EvacuateNewSpace();
   }
 
+  // We have to travers our allocation sites scratchpad which contains raw
+  // pointers before we move objects. During new space evacauation we
+  // gathered pretenuring statistics. The found allocation sites may not be
+  // valid after compacting old space.
+  heap()->ProcessPretenuringFeedback();
+
+
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
     EvacuatePages();
   }
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 4b6d280..b94f351 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1040,12 +1040,12 @@
     patch_site.EmitPatchInfo();
 
     Label skip;
-    __ b(&skip);
+    __ Branch(&skip);
     PrepareForBailout(clause, TOS_REG);
     __ LoadRoot(at, Heap::kTrueValueRootIndex);
     __ Branch(&next_test, ne, v0, Operand(at));
     __ Drop(1);
-    __ jmp(clause->body_target());
+    __ Branch(clause->body_target());
     __ bind(&skip);
 
     __ Branch(&next_test, ne, v0, Operand(zero_reg));
diff --git a/src/objects-inl.h b/src/objects-inl.h
index d488fc0..311afc0 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -267,6 +267,9 @@
 
 MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
                                            Representation representation) {
+  if (FLAG_track_fields && representation.IsSmi() && IsUninitialized()) {
+    return Smi::FromInt(0);
+  }
   if (!FLAG_track_double_fields) return this;
   if (!representation.IsDouble()) return this;
   if (IsUninitialized()) {
@@ -1367,9 +1370,12 @@
 }
 
 
-inline void AllocationSite::IncrementMementoFoundCount() {
+inline bool AllocationSite::IncrementMementoFoundCount() {
+  if (IsZombie()) return false;
+
   int value = memento_found_count()->value();
   set_memento_found_count(Smi::FromInt(value + 1));
+  return value == 0;
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index bfe1f55..aa7f500 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -9184,14 +9184,14 @@
 }
 
 
-AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
-                                                      bool in_GC) {
-  // Currently, AllocationMemento objects are only allocated immediately
-  // after JSArrays and some JSObjects in NewSpace. Detecting whether a
-  // memento is present involves carefully checking the object immediately
-  // after the current object (if there is one) to see if it's an
-  // AllocationMemento.
-  if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
+AllocationMemento* AllocationMemento::FindForHeapObject(HeapObject* object,
+                                                        bool in_GC) {
+  // AllocationMemento objects are only allocated immediately after objects in
+  // NewSpace. Detecting whether a memento is present involves carefully
+  // checking the object immediately after the current object (if there is one)
+  // to see if it's an AllocationMemento.
+  ASSERT(object->GetHeap()->InNewSpace(object));
+  if (FLAG_track_allocation_sites) {
     Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
         object->Size();
     Address top;
@@ -12899,7 +12899,9 @@
     return this;
   }
 
-  AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
+  if (!GetHeap()->InNewSpace(this)) return this;
+
+  AllocationMemento* memento = AllocationMemento::FindForHeapObject(this);
   if (memento == NULL || !memento->IsValid()) {
     return this;
   }
diff --git a/src/objects.h b/src/objects.h
index 28de179..0fa8304 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -8136,7 +8136,9 @@
   class UnusedBits:             public BitField<int,          15, 14> {};
   class DoNotInlineBit:         public BitField<bool,         29,  1> {};
 
-  inline void IncrementMementoFoundCount();
+  // Increments the mementos found counter and returns true when the first
+  // memento was found for a given allocation site.
+  inline bool IncrementMementoFoundCount();
 
   inline void IncrementMementoCreateCount();
 
@@ -8259,8 +8261,8 @@
   DECLARE_VERIFIER(AllocationMemento)
 
   // Returns NULL if no AllocationMemento is available for object.
-  static AllocationMemento* FindForJSObject(JSObject* object,
-                                            bool in_GC = false);
+  static AllocationMemento* FindForHeapObject(HeapObject* object,
+                                              bool in_GC = false);
   static inline AllocationMemento* cast(Object* obj);
 
  private:
diff --git a/src/v8.cc b/src/v8.cc
index 004a339..265f0a6 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -162,6 +162,15 @@
 
 void V8::InitializeOncePerProcessImpl() {
   FlagList::EnforceFlagImplications();
+
+  if (FLAG_predictable) {
+    if (FLAG_random_seed == 0) {
+      // Avoid random seeds in predictable mode.
+      FLAG_random_seed = 12347;
+    }
+    FLAG_hash_seed = 0;
+  }
+
   if (FLAG_stress_compaction) {
     FLAG_force_marking_deque_overflows = true;
     FLAG_gc_global = true;
diff --git a/src/version.cc b/src/version.cc
index 1a8f1c0..37f29c5 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     24
-#define BUILD_NUMBER      4
+#define BUILD_NUMBER      5
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c392b45..d35219c 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3057,6 +3057,7 @@
 
 
 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
   Register result = ToRegister(instr->result());
   LOperand* key = instr->key();
   if (!key->IsConstantOperand()) {
@@ -3066,24 +3067,37 @@
     // gets replaced during bound check elimination with the index
     // argument to the bounds check, which can be tagged, so that
     // case must be handled here, too.
-    if (instr->hydrogen()->IsDehoisted()) {
+    if (hinstr->IsDehoisted()) {
       // Sign extend key because it could be a 32 bit negative value
       // and the dehoisted address computation happens in 64 bits
       __ movsxlq(key_reg, key_reg);
     }
   }
 
-  // Load the result.
-  __ movq(result,
+  bool requires_hole_check = hinstr->RequiresHoleCheck();
+  int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+  Representation representation = hinstr->representation();
+
+  if (representation.IsInteger32() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    ASSERT(!requires_hole_check);
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+  }
+
+  __ Load(result,
           BuildFastArrayOperand(instr->elements(),
                                 key,
                                 FAST_ELEMENTS,
-                                FixedArray::kHeaderSize - kHeapObjectTag,
-                                instr->additional_index()));
+                                offset,
+                                instr->additional_index()),
+          representation);
 
   // Check for the hole value.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+  if (requires_hole_check) {
+    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
       Condition smi = __ CheckSmi(result);
       DeoptimizeIf(NegateCondition(smi), instr->environment());
     } else {
@@ -3924,13 +3938,14 @@
 
 
 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  HStoreNamedField* hinstr = instr->hydrogen();
   Representation representation = instr->representation();
 
-  HObjectAccess access = instr->hydrogen()->access();
+  HObjectAccess access = hinstr->access();
   int offset = access.offset();
 
   if (access.IsExternalMemory()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    ASSERT(!hinstr->NeedsWriteBarrier());
     Register value = ToRegister(instr->value());
     if (instr->object()->IsConstantOperand()) {
       ASSERT(value.is(rax));
@@ -3950,7 +3965,8 @@
   if (FLAG_track_fields && representation.IsSmi()) {
     if (instr->value()->IsConstantOperand()) {
       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
-      if (!IsSmiConstant(operand_value)) {
+      if (!IsInteger32Constant(operand_value) &&
+          !IsSmiConstant(operand_value)) {
         DeoptimizeIf(no_condition, instr->environment());
       }
     }
@@ -3961,7 +3977,7 @@
         DeoptimizeIf(no_condition, instr->environment());
       }
     } else {
-      if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+      if (!hinstr->value()->type().IsHeapObject()) {
         Register value = ToRegister(instr->value());
         Condition cc = masm()->CheckSmi(value);
         DeoptimizeIf(cc, instr->environment());
@@ -3970,14 +3986,14 @@
   } else if (FLAG_track_double_fields && representation.IsDouble()) {
     ASSERT(transition.is_null());
     ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    ASSERT(!hinstr->NeedsWriteBarrier());
     XMMRegister value = ToDoubleRegister(instr->value());
     __ movsd(FieldOperand(object, offset), value);
     return;
   }
 
   if (!transition.is_null()) {
-    if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+    if (!hinstr->NeedsWriteBarrierForMap()) {
       __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
     } else {
       Register temp = ToRegister(instr->temp());
@@ -3995,9 +4011,8 @@
   }
 
   // Do the store.
-  SmiCheck check_needed =
-      instr->hydrogen()->value()->IsHeapObject()
-          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  SmiCheck check_needed = hinstr->value()->IsHeapObject()
+                          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
 
   Register write_register = object;
   if (!access.IsInobject()) {
@@ -4005,26 +4020,41 @@
     __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
   }
 
-  if (instr->value()->IsConstantOperand()) {
-    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
-    if (operand_value->IsRegister()) {
-      Register value = ToRegister(operand_value);
-      __ Store(FieldOperand(write_register, offset), value, representation);
-    } else if (representation.IsInteger32()) {
-      int32_t value = ToInteger32(operand_value);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
-      __ movl(FieldOperand(write_register, offset), Immediate(value));
-    } else {
-      Handle<Object> handle_value = ToHandle(operand_value);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
-      __ Move(FieldOperand(write_register, offset), handle_value);
-    }
-  } else {
-    Register value = ToRegister(instr->value());
-    __ Store(FieldOperand(write_register, offset), value, representation);
+  if (representation.IsSmi() &&
+      hinstr->value()->representation().IsInteger32()) {
+    ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
   }
 
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
+  Operand operand = FieldOperand(write_register, offset);
+
+  if (instr->value()->IsRegister()) {
+    Register value = ToRegister(instr->value());
+    __ Store(operand, value, representation);
+  } else {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (IsInteger32Constant(operand_value)) {
+      ASSERT(!hinstr->NeedsWriteBarrier());
+      int32_t value = ToInteger32(operand_value);
+      if (representation.IsSmi()) {
+        __ Move(operand, Smi::FromInt(value));
+
+      } else {
+        __ movl(operand, Immediate(value));
+      }
+
+    } else {
+      Handle<Object> handle_value = ToHandle(operand_value);
+      ASSERT(!hinstr->NeedsWriteBarrier());
+      __ Move(operand, handle_value);
+    }
+  }
+
+  if (hinstr->NeedsWriteBarrier()) {
     Register value = ToRegister(instr->value());
     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
     // Update the write barrier for the object for in-object properties.
@@ -4211,7 +4241,7 @@
 
 
 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
-  Register elements = ToRegister(instr->elements());
+  HStoreKeyed* hinstr = instr->hydrogen();
   LOperand* key = instr->key();
   if (!key->IsConstantOperand()) {
     Register key_reg = ToRegister(key);
@@ -4220,38 +4250,56 @@
     // input gets replaced during bound check elimination with the index
     // argument to the bounds check, which can be tagged, so that case
     // must be handled here, too.
-    if (instr->hydrogen()->IsDehoisted()) {
+    if (hinstr->IsDehoisted()) {
       // Sign extend key because it could be a 32 bit negative value
       // and the dehoisted address computation happens in 64 bits
       __ movsxlq(key_reg, key_reg);
     }
   }
 
+  int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+  Representation representation = hinstr->value()->representation();
+
+  if (representation.IsInteger32()) {
+    ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+  }
+
   Operand operand =
       BuildFastArrayOperand(instr->elements(),
                             key,
                             FAST_ELEMENTS,
-                            FixedArray::kHeaderSize - kHeapObjectTag,
+                            offset,
                             instr->additional_index());
+
   if (instr->value()->IsRegister()) {
-    __ movq(operand, ToRegister(instr->value()));
+    __ Store(operand, ToRegister(instr->value()), representation);
   } else {
     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
     if (IsInteger32Constant(operand_value)) {
-      Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
-      __ Move(operand, smi_value);
+      int32_t value = ToInteger32(operand_value);
+      if (representation.IsSmi()) {
+        __ Move(operand, Smi::FromInt(value));
+
+      } else {
+        __ movl(operand, Immediate(value));
+      }
     } else {
       Handle<Object> handle_value = ToHandle(operand_value);
       __ Move(operand, handle_value);
     }
   }
 
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
+  if (hinstr->NeedsWriteBarrier()) {
+    Register elements = ToRegister(instr->elements());
     ASSERT(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
-    ASSERT(!instr->key()->IsConstantOperand());
-    SmiCheck check_needed =
-        instr->hydrogen()->value()->IsHeapObject()
+    ASSERT(!key->IsConstantOperand());
+    SmiCheck check_needed = hinstr->value()->IsHeapObject()
             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     Register key_reg(ToRegister(key));
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 480adf8..6388863 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -508,6 +508,13 @@
 }
 
 
+LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseTempRegister(value);
+}
+
+
 LOperand* LChunkBuilder::Use(HValue* value) {
   return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
@@ -2177,17 +2184,20 @@
     LOperand* key = NULL;
     LOperand* val = NULL;
 
-    if (instr->value()->representation().IsDouble()) {
+    Representation value_representation = instr->value()->representation();
+    if (value_representation.IsDouble()) {
       object = UseRegisterAtStart(instr->elements());
       val = UseTempRegister(instr->value());
       key = UseRegisterOrConstantAtStart(instr->key());
     } else {
-      ASSERT(instr->value()->representation().IsSmiOrTagged());
-      object = UseTempRegister(instr->elements());
+      ASSERT(value_representation.IsSmiOrTagged() ||
+             value_representation.IsInteger32());
       if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
         val = UseTempRegister(instr->value());
         key = UseTempRegister(instr->key());
       } else {
+        object = UseRegisterAtStart(instr->elements());
         val = UseRegisterOrConstantAtStart(instr->value());
         key = UseRegisterOrConstantAtStart(instr->key());
       }
@@ -2296,7 +2306,7 @@
   } else if (can_be_constant) {
     val = UseRegisterOrConstant(instr->value());
   } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
-    val = UseTempRegister(instr->value());
+    val = UseRegister(instr->value());
   } else if (FLAG_track_double_fields &&
              instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 7ba1d30..962000a 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -2710,6 +2710,9 @@
   // An input operand in a register that may be trashed.
   MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
 
+  // An input operand in a register that may be trashed or a constant operand.
+  MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
+
   // An input operand in a register or stack slot.
   MUST_USE_RESULT LOperand* Use(HValue* value);
   MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
diff --git a/test/mjsunit/regress/regress-crbug-329709.js b/test/mjsunit/regress/regress-crbug-329709.js
new file mode 100644
index 0000000..c5316f3
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-329709.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function boom(x) {
+  switch(x) {
+    case 1: return "one";
+    case 1500000000: return "non-smi int32";
+    default: return "default";
+  }
+}
+
+assertEquals("one", boom(1));
+assertEquals("one", boom(1));
+%OptimizeFunctionOnNextCall(boom)
+assertEquals("non-smi int32", boom(1500000000));