Version 3.7.5

Added initial gyp infrastructure for MIPS.

Implemented performance improvements to the incremental garbage collector.

Added optimizations and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@9950 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 6d2f393..bd0f083 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -41,6 +41,7 @@
     : heap_(heap),
       state_(STOPPED),
       marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
       steps_count_(0),
       steps_took_(0),
       longest_step_(0.0),
@@ -440,10 +441,25 @@
 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
   if (marking_deque_memory_ == NULL) {
     marking_deque_memory_ = new VirtualMemory(4 * MB);
-    marking_deque_memory_->Commit(
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
         reinterpret_cast<Address>(marking_deque_memory_->address()),
         marking_deque_memory_->size(),
         false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+  }
+}
+
+void IncrementalMarking::UncommitMarkingDeque() {
+  ASSERT(state_ == STOPPED);
+  if (marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
   }
 }
 
@@ -747,6 +763,7 @@
   if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
 
   intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+  bytes_scanned_ += bytes_to_process;
 
   double start = 0;
 
@@ -757,6 +774,7 @@
   if (state_ == SWEEPING) {
     if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
         heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+      bytes_scanned_ = 0;
       StartMarking(PREVENT_COMPACTION);
     }
   } else if (state_ == MARKING) {
@@ -808,35 +826,64 @@
 
   bool speed_up = false;
 
-  if (old_generation_space_available_at_start_of_incremental_ < 10 * MB ||
-      SpaceLeftInOldSpace() <
-          old_generation_space_available_at_start_of_incremental_ >> 1) {
-    // Half of the space that was available is gone while we were
-    // incrementally marking.
-    speed_up = true;
-    old_generation_space_available_at_start_of_incremental_ =
-        SpaceLeftInOldSpace();
-  }
-
-  if (heap_->PromotedTotalSize() >
-      old_generation_space_used_at_start_of_incremental_ << 1) {
-    // Size of old space doubled while we were incrementally marking.
-    speed_up = true;
-    old_generation_space_used_at_start_of_incremental_ =
-        heap_->PromotedTotalSize();
-  }
-
-  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 &&
-      allocation_marking_factor_ < kMaxAllocationMarkingFactor) {
-    speed_up = true;
-  }
-
-  if (speed_up && 0) {
-    allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
-    allocation_marking_factor_ =
-        static_cast<int>(allocation_marking_factor_ * 1.3);
+  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
     if (FLAG_trace_gc) {
-      PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+      PrintF("Speed up marking after %d steps\n",
+             static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+          old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (allocation_marking_factor_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking = heap_->PromotedTotalSize()
+      - old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = allocation_marking_factor_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintF("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+      allocation_marking_factor_ = static_cast<int>(
+          Min(kMaxAllocationMarkingFactor,
+              static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+      }
     }
   }
 
@@ -862,6 +909,7 @@
   steps_took_since_last_gc_ = 0;
   bytes_rescanned_ = 0;
   allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+  bytes_scanned_ = 0;
 }