Version 3.10.0

Fixed store IC writability check in strict mode (Chromium issue 120099).

Resynchronize timers if the Windows system time was changed. (Chromium issue 119815)

Removed "-mfloat-abi=hard" from host compiler cflags when building for hardfp ARM (https://code.google.com/p/chrome-os-partner/issues/detail?id=8539)

Fixed edge case for case independent regexp character classes (issue 2032).

Reset function info counters after context disposal. (Chromium issue 117767, V8 issue 1902)

Fixed missing write barrier in CopyObjectToObjectElements. (Chromium issue 119926)

Fixed missing bounds check in HasElementImpl. (Chromium issue 119925)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@11192 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index fc7e359..7d51190 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,6 +65,12 @@
 // Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
+// If a function does not have enough type info (according to
+// FLAG_type_info_threshold), but has seen a huge number of ticks,
+// optimize it as it is.
+static const int kTicksWhenNotEnoughTypeInfo = 100;
+// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
 
 // Maximum size in bytes of generated code for a function to be optimized
 // the very first time it is seen on the stack.
@@ -72,9 +78,9 @@
 
 
 Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+
+// TODO(isolates): Clean up the semaphore when it is no longer required.
+static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
 
 #ifdef DEBUG
 bool RuntimeProfiler::has_been_globally_set_up_ = false;
@@ -88,7 +94,9 @@
       sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
       sampler_ticks_until_threshold_adjustment_(
           kSamplerTicksBetweenThresholdAdjustment),
-      sampler_window_position_(0) {
+      sampler_window_position_(0),
+      any_ic_changed_(false),
+      code_generated_(false) {
   ClearSampleBuffer();
 }
 
@@ -103,20 +111,20 @@
 
 
 static void GetICCounts(JSFunction* function,
-                        int* ic_with_typeinfo_count,
+                        int* ic_with_type_info_count,
                         int* ic_total_count,
                         int* percentage) {
   *ic_total_count = 0;
-  *ic_with_typeinfo_count = 0;
+  *ic_with_type_info_count = 0;
   Object* raw_info =
       function->shared()->code()->type_feedback_info();
   if (raw_info->IsTypeFeedbackInfo()) {
     TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
-    *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+    *ic_with_type_info_count = info->ic_with_type_info_count();
     *ic_total_count = info->ic_total_count();
   }
   *percentage = *ic_total_count > 0
-      ? 100 * *ic_with_typeinfo_count / *ic_total_count
+      ? 100 * *ic_with_type_info_count / *ic_total_count
       : 100;
 }
 
@@ -173,12 +181,10 @@
   // prepared to generate it, but we don't expect to have to.
   bool found_code = false;
   Code* stack_check_code = NULL;
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
   if (FLAG_count_based_interrupts) {
     InterruptStub interrupt_stub;
     found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
   } else  // NOLINT
-#endif
   {  // NOLINT
     StackCheckStub check_stub;
     found_code = check_stub.FindCodeInCache(&stack_check_code);
@@ -257,13 +263,14 @@
       }
     }
 
-    if (function->IsMarkedForLazyRecompilation() &&
-        function->shared()->code()->kind() == Code::FUNCTION) {
-      Code* unoptimized = function->shared()->code();
-      int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+    Code* shared_code = function->shared()->code();
+    if (shared_code->kind() != Code::FUNCTION) continue;
+
+    if (function->IsMarkedForLazyRecompilation()) {
+      int nesting = shared_code->allow_osr_at_loop_nesting_level();
       if (nesting == 0) AttemptOnStackReplacement(function);
       int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
-      unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+      shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
     }
 
     // Do not record non-optimizable functions.
@@ -281,7 +288,7 @@
     }
 
     if (FLAG_watch_ic_patching) {
-      int ticks = function->shared()->profiler_ticks();
+      int ticks = shared_code->profiler_ticks();
 
       if (ticks >= kProfilerTicksBeforeOptimization) {
         int typeinfo, total, percentage;
@@ -290,12 +297,10 @@
           // If this particular function hasn't had any ICs patched for enough
           // ticks, optimize it now.
           Optimize(function, "hot and stable");
-        } else if (ticks >= 100) {
-          // If this function does not have enough type info, but has
-          // seen a huge number of ticks, optimize it as it is.
+        } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
           Optimize(function, "not much type info but very hot");
         } else {
-          function->shared()->set_profiler_ticks(ticks + 1);
+          shared_code->set_profiler_ticks(ticks + 1);
           if (FLAG_trace_opt_verbose) {
             PrintF("[not yet optimizing ");
             function->PrintName();
@@ -304,20 +309,12 @@
           }
         }
       } else if (!any_ic_changed_ &&
-          function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+          shared_code->instruction_size() < kMaxSizeEarlyOpt) {
         // If no IC was patched since the last tick and this function is very
         // small, optimistically optimize it now.
         Optimize(function, "small function");
-      } else if (!code_generated_ &&
-          !any_ic_changed_ &&
-          total_code_generated_ > 0 &&
-          total_code_generated_ < 2000) {
-        // If no code was generated and no IC was patched since the last tick,
-        // but a little code has already been generated since last Reset(),
-        // then type info might already be stable and we can optimize now.
-        Optimize(function, "stable on startup");
       } else {
-        function->shared()->set_profiler_ticks(ticks + 1);
+        shared_code->set_profiler_ticks(ticks + 1);
       }
     } else {  // !FLAG_watch_ic_patching
       samples[sample_count++] = function;
@@ -336,7 +333,6 @@
   }
   if (FLAG_watch_ic_patching) {
     any_ic_changed_ = false;
-    code_generated_ = false;
   } else {  // !FLAG_watch_ic_patching
     // Add the collected functions as samples. It's important not to do
     // this as part of collecting them because this will interfere with
@@ -349,9 +345,7 @@
 
 
 void RuntimeProfiler::NotifyTick() {
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
   if (FLAG_count_based_interrupts) return;
-#endif
   isolate_->stack_guard()->RequestRuntimeProfilerTick();
 }
 
@@ -368,9 +362,7 @@
 
 
 void RuntimeProfiler::Reset() {
-  if (FLAG_watch_ic_patching) {
-    total_code_generated_ = 0;
-  } else {  // !FLAG_watch_ic_patching
+  if (!FLAG_watch_ic_patching) {
     sampler_threshold_ = kSamplerThresholdInit;
     sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
     sampler_ticks_until_threshold_adjustment_ =
@@ -412,7 +404,7 @@
   // undid the decrement done by the profiler thread. Increment again
   // to get the right count of active isolates.
   NoBarrier_AtomicIncrement(&state_, 1);
-  semaphore_->Signal();
+  semaphore.Pointer()->Signal();
 }
 
 
@@ -425,7 +417,7 @@
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
   if (old_state != 0) return false;
-  semaphore_->Wait();
+  semaphore.Pointer()->Wait();
   return true;
 }
 
@@ -441,7 +433,7 @@
   if (new_state == 0) {
     // The profiler thread is waiting. Wake it up. It must check for
     // stop conditions before attempting to wait again.
-    semaphore_->Signal();
+    semaphore.Pointer()->Signal();
   }
   thread->Join();
   // The profiler thread is now stopped. Undo the increment in case it