Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 56c5a24..be63021 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -5,14 +5,18 @@
 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
 #define V8_HEAP_INCREMENTAL_MARKING_H_
 
-
+#include "src/cancelable-task.h"
 #include "src/execution.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/incremental-marking-job.h"
+#include "src/heap/spaces.h"
 #include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class MarkBit;
+class PagedSpace;
 
 class IncrementalMarking {
  public:
@@ -24,6 +28,23 @@
 
   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
 
+  enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
+
+  struct StepActions {
+    StepActions(CompletionAction complete_action_,
+                ForceMarkingAction force_marking_,
+                ForceCompletionAction force_completion_)
+        : completion_action(complete_action_),
+          force_marking(force_marking_),
+          force_completion(force_completion_) {}
+
+    CompletionAction completion_action;
+    ForceMarkingAction force_marking;
+    ForceCompletionAction force_completion;
+  };
+
+  static StepActions IdleStepActions();
+
   explicit IncrementalMarking(Heap* heap);
 
   static void Initialize();
@@ -36,6 +57,14 @@
   bool should_hurry() { return should_hurry_; }
   void set_should_hurry(bool val) { should_hurry_ = val; }
 
+  bool finalize_marking_completed() const {
+    return finalize_marking_completed_;
+  }
+
+  void SetWeakClosureWasOverApproximatedForTesting(bool val) {
+    finalize_marking_completed_ = val;
+  }
+
   inline bool IsStopped() { return state() == STOPPED; }
 
   INLINE(bool IsMarking()) { return state() >= MARKING; }
@@ -44,19 +73,21 @@
 
   inline bool IsComplete() { return state() == COMPLETE; }
 
-  bool WorthActivating();
+  inline bool IsReadyToOverApproximateWeakClosure() const {
+    return request_type_ == FINALIZATION && !finalize_marking_completed_;
+  }
 
-  bool ShouldActivate();
+  GCRequestType request_type() const { return request_type_; }
+
+  bool CanBeActivated();
+
+  bool ShouldActivateEvenWithoutIdleNotification();
 
   bool WasActivated();
 
-  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+  void Start(const char* reason = nullptr);
 
-  void Start(CompactionFlag flag = ALLOW_COMPACTION);
-
-  void Stop();
-
-  void PrepareForScavenge();
+  void FinalizeIncrementally();
 
   void UpdateMarkingDequeAfterScavenge();
 
@@ -64,12 +95,23 @@
 
   void Finalize();
 
-  void Abort();
+  void Stop();
+
+  void FinalizeMarking(CompletionAction action);
 
   void MarkingComplete(CompletionAction action);
 
   void Epilogue();
 
+  // Performs incremental marking steps of step_size_in_bytes as long as
+  // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
+  // an estimate increment. Returns the remaining time that cannot be used
+  // for incremental marking anymore because a single step would exceed the
+  // deadline.
+  double AdvanceIncrementalMarking(intptr_t step_size_in_bytes,
+                                   double deadline_in_ms,
+                                   StepActions step_actions);
+
   // It's hard to know how much work the incremental marker should do to make
   // progress in the face of the mutator creating new work for it.  We start
   // of at a moderate rate of work and gradually increase the speed of the
@@ -117,7 +159,7 @@
   // No slots in white objects should be recorded, as some slots are typed and
   // cannot be interpreted correctly if the underlying object does not survive
   // the incremental cycle (stays white).
-  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
   INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
                                   Object* value));
@@ -132,17 +174,17 @@
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
   void RecordCodeTargetPatch(Address pc, HeapObject* value);
 
-  inline void RecordWrites(HeapObject* obj);
+  void RecordWrites(HeapObject* obj);
 
-  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+  void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
 
-  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+  void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
 
   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
   }
 
-  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+  inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
     SetNewSpacePageFlags(chunk, IsMarking());
   }
 
@@ -150,19 +192,7 @@
 
   void ActivateGeneratedStub(Code* stub);
 
-  void NotifyOfHighPromotionRate() {
-    if (IsMarking()) {
-      if (marking_speed_ < kFastMarking) {
-        if (FLAG_trace_gc) {
-          PrintPID(
-              "Increasing marking speed to %d "
-              "due to high promotion rate\n",
-              static_cast<int>(kFastMarking));
-        }
-        marking_speed_ = kFastMarking;
-      }
-    }
-  }
+  void NotifyOfHighPromotionRate();
 
   void EnterNoMarkingScope() { no_marking_scope_depth_++; }
 
@@ -176,14 +206,44 @@
 
   bool IsIdleMarkingDelayCounterLimitReached();
 
+  INLINE(static void MarkObject(Heap* heap, HeapObject* object));
+
+  Heap* heap() const { return heap_; }
+
+  IncrementalMarkingJob* incremental_marking_job() {
+    return &incremental_marking_job_;
+  }
+
  private:
+  class Observer : public InlineAllocationObserver {
+   public:
+    Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+        : InlineAllocationObserver(step_size),
+          incremental_marking_(incremental_marking) {}
+
+    void Step(int bytes_allocated, Address, size_t) override {
+      incremental_marking_.Step(bytes_allocated,
+                                IncrementalMarking::GC_VIA_STACK_GUARD);
+    }
+
+   private:
+    IncrementalMarking& incremental_marking_;
+  };
+
   int64_t SpaceLeftInOldSpace();
 
   void SpeedUp();
 
   void ResetStepCounters();
 
-  void StartMarking(CompactionFlag flag);
+  void StartMarking();
+
+  void MarkRoots();
+  void MarkObjectGroups();
+  void ProcessWeakCells();
+  // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
+  // increase chances of reusing of map transition tree in future.
+  void RetainMaps();
 
   void ActivateIncrementalWriteBarrier(PagedSpace* space);
   static void ActivateIncrementalWriteBarrier(NewSpace* space);
@@ -196,7 +256,7 @@
   static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
                                    bool is_compacting);
 
-  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+  static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
 
   INLINE(void ProcessMarkingDeque());
 
@@ -208,6 +268,8 @@
 
   Heap* heap_;
 
+  Observer observer_;
+
   State state_;
   bool is_compacting_;
 
@@ -228,9 +290,17 @@
 
   bool was_activated_;
 
+  bool finalize_marking_completed_;
+
+  int incremental_marking_finalization_rounds_;
+
+  GCRequestType request_type_;
+
+  IncrementalMarkingJob incremental_marking_job_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
 };
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_INCREMENTAL_MARKING_H_