Version 3.18.0

Enabled pretenuring of fast literals in high promotion mode.

Removed preparser library; link preparser executable against full V8.

Fixed set-up of intrinsic's 'constructor' properties. (Chromium issue 229445)

ES6 symbols: extended V8 API to support symbols (issue 2158).

Removed ARM support for VFP2.

Made __proto__ a real JavaScript accessor property. (issue 1949 and issue 2606)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@14304 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 2606f8a..752d79c 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -86,17 +86,6 @@
     5 * FullCodeGenerator::kBackEdgeDistanceUnit;
 
 
-Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
-
-#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
-#endif
-bool RuntimeProfiler::enabled_ = false;
-
-
 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
     : isolate_(isolate),
       sampler_threshold_(kSamplerThresholdInit),
@@ -110,15 +99,6 @@
 }
 
 
-void RuntimeProfiler::GlobalSetUp() {
-  ASSERT(!has_been_globally_set_up_);
-  enabled_ = V8::UseCrankshaft() && FLAG_opt;
-#ifdef DEBUG
-  has_been_globally_set_up_ = true;
-#endif
-}
-
-
 static void GetICCounts(JSFunction* function,
                         int* ic_with_type_info_count,
                         int* ic_total_count,
@@ -190,23 +170,22 @@
   // any back edge in any unoptimized frame will trigger on-stack
   // replacement for that frame.
   if (FLAG_trace_osr) {
-    PrintF("[patching stack checks in ");
+    PrintF("[patching back edges in ");
     function->PrintName();
     PrintF(" for on-stack replacement]\n");
   }
 
-  // Get the stack check stub code object to match against.  We aren't
+  // Get the interrupt stub code object to match against.  We aren't
   // prepared to generate it, but we don't expect to have to.
-  Code* stack_check_code = NULL;
+  Code* interrupt_code = NULL;
   InterruptStub interrupt_stub;
-  bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
+  bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
   if (found_code) {
     Code* replacement_code =
         isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
     Code* unoptimized_code = shared->code();
-    Deoptimizer::PatchStackCheckCode(unoptimized_code,
-                                     stack_check_code,
-                                     replacement_code);
+    Deoptimizer::PatchInterruptCode(
+        unoptimized_code, interrupt_code, replacement_code);
   }
 }
 
@@ -296,9 +275,11 @@
          function->IsMarkedForParallelRecompilation() ||
          function->IsOptimized())) {
       int nesting = shared_code->allow_osr_at_loop_nesting_level();
-      if (nesting == 0) AttemptOnStackReplacement(function);
-      int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
-      shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+      if (nesting < Code::kMaxLoopNestingMarker) {
+        int new_nesting = nesting + 1;
+        shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+        AttemptOnStackReplacement(function);
+      }
     }
 
     // Only record top-level code on top of the execution stack and
@@ -385,13 +366,9 @@
 
 
 void RuntimeProfiler::SetUp() {
-  ASSERT(has_been_globally_set_up_);
   if (!FLAG_watch_ic_patching) {
     ClearSampleBuffer();
   }
-  // If the ticker hasn't already started, make sure to do so to get
-  // the ticks for the runtime profiler.
-  if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
 }
 
 
@@ -431,48 +408,6 @@
 }
 
 
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
-  // The profiler thread must still be waiting.
-  ASSERT(NoBarrier_Load(&state_) >= 0);
-  // In IsolateEnteredJS we have already incremented the counter and
-  // undid the decrement done by the profiler thread. Increment again
-  // to get the right count of active isolates.
-  NoBarrier_AtomicIncrement(&state_, 1);
-  semaphore.Pointer()->Signal();
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
-  Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
-  ASSERT(old_state >= -1);
-  if (old_state != 0) return false;
-  semaphore.Pointer()->Wait();
-  return true;
-}
-
-
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
-  // Do a fake increment. If the profiler is waiting on the semaphore,
-  // the returned state is 0, which can be left as an initial state in
-  // case profiling is restarted later. If the profiler is not
-  // waiting, the increment will prevent it from waiting, but has to
-  // be undone after the profiler is stopped.
-  Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
-  ASSERT(new_state >= 0);
-  if (new_state == 0) {
-    // The profiler thread is waiting. Wake it up. It must check for
-    // stop conditions before attempting to wait again.
-    semaphore.Pointer()->Signal();
-  }
-  thread->Join();
-  // The profiler thread is now stopped. Undo the increment in case it
-  // was not waiting.
-  if (new_state != 0) {
-    NoBarrier_AtomicIncrement(&state_, -1);
-  }
-}
-
-
 void RuntimeProfiler::RemoveDeadSamples() {
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* function = sampler_window_[i];