blob: b06168a246838ae481e73a8425992b44cd196d0d [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "runtime-profiler.h"
31
32#include "assembler.h"
33#include "code-stubs.h"
34#include "compilation-cache.h"
35#include "deoptimizer.h"
36#include "execution.h"
37#include "global-handles.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010038#include "isolate-inl.h"
Ben Murdoche0cee9b2011-05-25 10:26:03 +010039#include "mark-compact.h"
Steve Block44f0eee2011-05-26 01:26:41 +010040#include "platform.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010041#include "scopeinfo.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042
43namespace v8 {
44namespace internal {
45
46
Ben Murdochb0fe1622011-05-05 13:52:32 +010047// Optimization sampler constants.
48static const int kSamplerFrameCount = 2;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010049
50// Constants for statistical profiler.
Ben Murdochb0fe1622011-05-05 13:52:32 +010051static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
Ben Murdochb0fe1622011-05-05 13:52:32 +010052
53static const int kSamplerTicksBetweenThresholdAdjustment = 32;
54
55static const int kSamplerThresholdInit = 3;
56static const int kSamplerThresholdMin = 1;
57static const int kSamplerThresholdDelta = 1;
58
59static const int kSamplerThresholdSizeFactorInit = 3;
Ben Murdochb0fe1622011-05-05 13:52:32 +010060
61static const int kSizeLimit = 1500;
62
Ben Murdoch3ef787d2012-04-12 10:51:47 +010063// Constants for counter based profiler.
64
65// Number of times a function has to be seen on the stack before it is
66// optimized.
67static const int kProfilerTicksBeforeOptimization = 2;
Ben Murdoch8f9999f2012-04-23 10:39:17 +010068// If a function does not have enough type info (according to
69// FLAG_type_info_threshold), but has seen a huge number of ticks,
70// optimize it as it is.
71static const int kTicksWhenNotEnoughTypeInfo = 100;
72// We only have one byte to store the number of ticks.
73STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
Ben Murdoch3ef787d2012-04-12 10:51:47 +010074
75// Maximum size in bytes of generated code for a function to be optimized
76// the very first time it is seen on the stack.
77static const int kMaxSizeEarlyOpt = 500;
78
Ben Murdochb0fe1622011-05-05 13:52:32 +010079
Steve Block44f0eee2011-05-26 01:26:41 +010080Atomic32 RuntimeProfiler::state_ = 0;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010081
82// TODO(isolates): Clean up the semaphore when it is no longer required.
83static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
Steve Block44f0eee2011-05-26 01:26:41 +010084
Ben Murdoch8b112d22011-06-08 16:22:53 +010085#ifdef DEBUG
Ben Murdoch3ef787d2012-04-12 10:51:47 +010086bool RuntimeProfiler::has_been_globally_set_up_ = false;
Ben Murdoch8b112d22011-06-08 16:22:53 +010087#endif
88bool RuntimeProfiler::enabled_ = false;
89
Steve Block44f0eee2011-05-26 01:26:41 +010090
91RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
92 : isolate_(isolate),
93 sampler_threshold_(kSamplerThresholdInit),
94 sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
95 sampler_ticks_until_threshold_adjustment_(
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000096 kSamplerTicksBetweenThresholdAdjustment),
97 sampler_window_position_(0) {
Steve Block44f0eee2011-05-26 01:26:41 +010098 ClearSampleBuffer();
99}
100
101
Ben Murdoch8b112d22011-06-08 16:22:53 +0100102void RuntimeProfiler::GlobalSetup() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100103 ASSERT(!has_been_globally_set_up_);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100104 enabled_ = V8::UseCrankshaft() && FLAG_opt;
105#ifdef DEBUG
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100106 has_been_globally_set_up_ = true;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100107#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100108}
109
110
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100111static void GetICCounts(JSFunction* function,
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100112 int* ic_with_type_info_count,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100113 int* ic_total_count,
114 int* percentage) {
115 *ic_total_count = 0;
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100116 *ic_with_type_info_count = 0;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100117 Object* raw_info =
118 function->shared()->code()->type_feedback_info();
119 if (raw_info->IsTypeFeedbackInfo()) {
120 TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100121 *ic_with_type_info_count = info->ic_with_type_info_count();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100122 *ic_total_count = info->ic_total_count();
123 }
124 *percentage = *ic_total_count > 0
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100125 ? 100 * *ic_with_type_info_count / *ic_total_count
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100126 : 100;
127}
128
129
130void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100131 ASSERT(function->IsOptimizable());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100132 if (FLAG_trace_opt) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000133 PrintF("[marking ");
Ben Murdochb0fe1622011-05-05 13:52:32 +0100134 function->PrintName();
Ben Murdoch257744e2011-11-30 15:57:28 +0000135 PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100136 PrintF(" for recompilation, reason: %s", reason);
137 if (FLAG_type_info_threshold > 0) {
138 int typeinfo, total, percentage;
139 GetICCounts(function, &typeinfo, &total, &percentage);
140 PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
141 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100142 PrintF("]\n");
143 }
144
145 // The next call to the function will trigger optimization.
146 function->MarkForLazyRecompilation();
147}
148
149
Steve Block44f0eee2011-05-26 01:26:41 +0100150void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100151 // See AlwaysFullCompiler (in compiler.cc) comment on why we need
152 // Debug::has_break_points().
153 ASSERT(function->IsMarkedForLazyRecompilation());
Steve Block44f0eee2011-05-26 01:26:41 +0100154 if (!FLAG_use_osr ||
Ben Murdoch257744e2011-11-30 15:57:28 +0000155 isolate_->DebuggerHasBreakPoints() ||
Steve Block44f0eee2011-05-26 01:26:41 +0100156 function->IsBuiltin()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100157 return;
158 }
159
160 SharedFunctionInfo* shared = function->shared();
Ben Murdoch589d6972011-11-30 16:04:58 +0000161 // If the code is not optimizable, don't try OSR.
162 if (!shared->code()->optimizable()) return;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100163
164 // We are not prepared to do OSR for a function that already has an
165 // allocated arguments object. The optimized code would bypass it for
166 // arguments accesses, which is unsound. Don't try OSR.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000167 if (shared->uses_arguments()) return;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100168
169 // We're using on-stack replacement: patch the unoptimized code so that
170 // any back edge in any unoptimized frame will trigger on-stack
171 // replacement for that frame.
172 if (FLAG_trace_osr) {
173 PrintF("[patching stack checks in ");
174 function->PrintName();
175 PrintF(" for on-stack replacement]\n");
176 }
177
178 // Get the stack check stub code object to match against. We aren't
179 // prepared to generate it, but we don't expect to have to.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100180 bool found_code = false;
181 Code* stack_check_code = NULL;
182#if defined(V8_TARGET_ARCH_IA32) || \
183 defined(V8_TARGET_ARCH_ARM) || \
184 defined(V8_TARGET_ARCH_MIPS)
185 if (FLAG_count_based_interrupts) {
186 InterruptStub interrupt_stub;
187 found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
188 } else // NOLINT
189#endif
190 { // NOLINT
191 StackCheckStub check_stub;
192 found_code = check_stub.FindCodeInCache(&stack_check_code);
193 }
194 if (found_code) {
Steve Block44f0eee2011-05-26 01:26:41 +0100195 Code* replacement_code =
196 isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100197 Code* unoptimized_code = shared->code();
Steve Block1e0659c2011-05-24 12:43:12 +0100198 Deoptimizer::PatchStackCheckCode(unoptimized_code,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100199 stack_check_code,
Steve Block1e0659c2011-05-24 12:43:12 +0100200 replacement_code);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100201 }
202}
203
204
Steve Block44f0eee2011-05-26 01:26:41 +0100205void RuntimeProfiler::ClearSampleBuffer() {
206 memset(sampler_window_, 0, sizeof(sampler_window_));
207 memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100208}
209
210
Steve Block44f0eee2011-05-26 01:26:41 +0100211int RuntimeProfiler::LookupSample(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100212 int weight = 0;
213 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100214 Object* sample = sampler_window_[i];
Ben Murdochb0fe1622011-05-05 13:52:32 +0100215 if (sample != NULL) {
216 if (function == sample) {
Steve Block44f0eee2011-05-26 01:26:41 +0100217 weight += sampler_window_weight_[i];
Ben Murdochb0fe1622011-05-05 13:52:32 +0100218 }
219 }
220 }
221 return weight;
222}
223
224
Steve Block44f0eee2011-05-26 01:26:41 +0100225void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100226 ASSERT(IsPowerOf2(kSamplerWindowSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100227 sampler_window_[sampler_window_position_] = function;
228 sampler_window_weight_[sampler_window_position_] = weight;
229 sampler_window_position_ = (sampler_window_position_ + 1) &
Ben Murdochb0fe1622011-05-05 13:52:32 +0100230 (kSamplerWindowSize - 1);
231}
232
233
234void RuntimeProfiler::OptimizeNow() {
Steve Block44f0eee2011-05-26 01:26:41 +0100235 HandleScope scope(isolate_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100236
237 // Run through the JavaScript frames and collect them. If we already
238 // have a sample of the function, we mark it for optimizations
239 // (eagerly or lazily).
240 JSFunction* samples[kSamplerFrameCount];
241 int sample_count = 0;
242 int frame_count = 0;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100243 int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
244 : kSamplerFrameCount;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100245 for (JavaScriptFrameIterator it(isolate_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100246 frame_count++ < frame_count_limit && !it.done();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100247 it.Advance()) {
248 JavaScriptFrame* frame = it.frame();
249 JSFunction* function = JSFunction::cast(frame->function());
250
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100251 if (!FLAG_watch_ic_patching) {
252 // Adjust threshold each time we have processed
253 // a certain number of ticks.
254 if (sampler_ticks_until_threshold_adjustment_ > 0) {
255 sampler_ticks_until_threshold_adjustment_--;
256 if (sampler_ticks_until_threshold_adjustment_ <= 0) {
257 // If the threshold is not already at the minimum
258 // modify and reset the ticks until next adjustment.
259 if (sampler_threshold_ > kSamplerThresholdMin) {
260 sampler_threshold_ -= kSamplerThresholdDelta;
261 sampler_ticks_until_threshold_adjustment_ =
262 kSamplerTicksBetweenThresholdAdjustment;
263 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100264 }
265 }
266 }
267
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100268 Code* shared_code = function->shared()->code();
269 if (shared_code->kind() != Code::FUNCTION) continue;
270
271 if (function->IsMarkedForLazyRecompilation()) {
272 int nesting = shared_code->allow_osr_at_loop_nesting_level();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100273 if (nesting == 0) AttemptOnStackReplacement(function);
274 int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100275 shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100276 }
277
278 // Do not record non-optimizable functions.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100279 if (!function->IsOptimizable()) continue;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100280 if (function->shared()->optimization_disabled()) continue;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100281
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100282 // Only record top-level code on top of the execution stack and
283 // avoid optimizing excessively large scripts since top-level code
284 // will be executed only once.
285 const int kMaxToplevelSourceSize = 10 * 1024;
286 if (function->shared()->is_toplevel()
287 && (frame_count > 1
288 || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
289 continue;
290 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100291
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100292 if (FLAG_watch_ic_patching) {
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100293 int ticks = shared_code->profiler_ticks();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100294
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100295 if (ticks >= kProfilerTicksBeforeOptimization) {
296 int typeinfo, total, percentage;
297 GetICCounts(function, &typeinfo, &total, &percentage);
298 if (percentage >= FLAG_type_info_threshold) {
299 // If this particular function hasn't had any ICs patched for enough
300 // ticks, optimize it now.
301 Optimize(function, "hot and stable");
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100302 } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100303 Optimize(function, "not much type info but very hot");
304 } else {
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100305 shared_code->set_profiler_ticks(ticks + 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100306 if (FLAG_trace_opt_verbose) {
307 PrintF("[not yet optimizing ");
308 function->PrintName();
309 PrintF(", not enough type info: %d/%d (%d%%)]\n",
310 typeinfo, total, percentage);
311 }
312 }
313 } else if (!any_ic_changed_ &&
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100314 shared_code->instruction_size() < kMaxSizeEarlyOpt) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100315 // If no IC was patched since the last tick and this function is very
316 // small, optimistically optimize it now.
317 Optimize(function, "small function");
318 } else if (!code_generated_ &&
319 !any_ic_changed_ &&
320 total_code_generated_ > 0 &&
321 total_code_generated_ < 2000) {
322 // If no code was generated and no IC was patched since the last tick,
323 // but a little code has already been generated since last Reset(),
324 // then type info might already be stable and we can optimize now.
325 Optimize(function, "stable on startup");
326 } else {
Ben Murdoch8f9999f2012-04-23 10:39:17 +0100327 shared_code->set_profiler_ticks(ticks + 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100328 }
329 } else { // !FLAG_watch_ic_patching
330 samples[sample_count++] = function;
331
332 int function_size = function->shared()->SourceSize();
333 int threshold_size_factor = (function_size > kSizeLimit)
334 ? sampler_threshold_size_factor_
335 : 1;
336
337 int threshold = sampler_threshold_ * threshold_size_factor;
338
339 if (LookupSample(function) >= threshold) {
340 Optimize(function, "sampler window lookup");
341 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100342 }
343 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100344 if (FLAG_watch_ic_patching) {
345 any_ic_changed_ = false;
346 code_generated_ = false;
347 } else { // !FLAG_watch_ic_patching
348 // Add the collected functions as samples. It's important not to do
349 // this as part of collecting them because this will interfere with
350 // the sample lookup in case of recursive functions.
351 for (int i = 0; i < sample_count; i++) {
352 AddSample(samples[i], kSamplerFrameWeight[i]);
353 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100354 }
355}
356
357
Ben Murdochb0fe1622011-05-05 13:52:32 +0100358void RuntimeProfiler::NotifyTick() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100359#if defined(V8_TARGET_ARCH_IA32) || \
360 defined(V8_TARGET_ARCH_ARM) || \
361 defined(V8_TARGET_ARCH_MIPS)
362 if (FLAG_count_based_interrupts) return;
363#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100364 isolate_->stack_guard()->RequestRuntimeProfilerTick();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100365}
366
367
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100368void RuntimeProfiler::SetUp() {
369 ASSERT(has_been_globally_set_up_);
370 if (!FLAG_watch_ic_patching) {
371 ClearSampleBuffer();
372 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100373 // If the ticker hasn't already started, make sure to do so to get
374 // the ticks for the runtime profiler.
Steve Block44f0eee2011-05-26 01:26:41 +0100375 if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100376}
377
378
379void RuntimeProfiler::Reset() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100380 if (FLAG_watch_ic_patching) {
381 total_code_generated_ = 0;
382 } else { // !FLAG_watch_ic_patching
383 sampler_threshold_ = kSamplerThresholdInit;
384 sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
385 sampler_ticks_until_threshold_adjustment_ =
386 kSamplerTicksBetweenThresholdAdjustment;
387 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100388}
389
390
391void RuntimeProfiler::TearDown() {
392 // Nothing to do.
393}
394
395
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100396int RuntimeProfiler::SamplerWindowSize() {
397 return kSamplerWindowSize;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100398}
399
400
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100401// Update the pointers in the sampler window after a GC.
402void RuntimeProfiler::UpdateSamplesAfterScavenge() {
403 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100404 Object* function = sampler_window_[i];
405 if (function != NULL && isolate_->heap()->InNewSpace(function)) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100406 MapWord map_word = HeapObject::cast(function)->map_word();
407 if (map_word.IsForwardingAddress()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100408 sampler_window_[i] = map_word.ToForwardingAddress();
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100409 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100410 sampler_window_[i] = NULL;
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100411 }
412 }
413 }
414}
415
416
Steve Block44f0eee2011-05-26 01:26:41 +0100417void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100418 // The profiler thread must still be waiting.
419 ASSERT(NoBarrier_Load(&state_) >= 0);
420 // In IsolateEnteredJS we have already incremented the counter and
421 // undid the decrement done by the profiler thread. Increment again
422 // to get the right count of active isolates.
423 NoBarrier_AtomicIncrement(&state_, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100424 semaphore.Pointer()->Signal();
Steve Block44f0eee2011-05-26 01:26:41 +0100425}
426
427
428bool RuntimeProfiler::IsSomeIsolateInJS() {
429 return NoBarrier_Load(&state_) > 0;
430}
431
432
433bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
Steve Block44f0eee2011-05-26 01:26:41 +0100434 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
435 ASSERT(old_state >= -1);
436 if (old_state != 0) return false;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100437 semaphore.Pointer()->Wait();
Steve Block44f0eee2011-05-26 01:26:41 +0100438 return true;
439}
440
441
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000442void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
443 // Do a fake increment. If the profiler is waiting on the semaphore,
444 // the returned state is 0, which can be left as an initial state in
445 // case profiling is restarted later. If the profiler is not
446 // waiting, the increment will prevent it from waiting, but has to
447 // be undone after the profiler is stopped.
448 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
449 ASSERT(new_state >= 0);
450 if (new_state == 0) {
451 // The profiler thread is waiting. Wake it up. It must check for
452 // stop conditions before attempting to wait again.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100453 semaphore.Pointer()->Signal();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000454 }
455 thread->Join();
456 // The profiler thread is now stopped. Undo the increment in case it
457 // was not waiting.
458 if (new_state != 0) {
459 NoBarrier_AtomicIncrement(&state_, -1);
460 }
Steve Block44f0eee2011-05-26 01:26:41 +0100461}
462
463
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100464void RuntimeProfiler::RemoveDeadSamples() {
465 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100466 Object* function = sampler_window_[i];
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100467 if (function != NULL &&
468 !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100469 sampler_window_[i] = NULL;
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100470 }
471 }
472}
473
474
475void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
476 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100477 visitor->VisitPointer(&sampler_window_[i]);
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100478 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100479}
480
481
482bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000483 if (!RuntimeProfiler::IsSomeIsolateInJS()) {
484 return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100485 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100486 return false;
487}
488
489
490} } // namespace v8::internal