blob: ce9a3082363d1966fd7a934b6cece51c44224059 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "runtime-profiler.h"
31
32#include "assembler.h"
33#include "code-stubs.h"
34#include "compilation-cache.h"
35#include "deoptimizer.h"
36#include "execution.h"
37#include "global-handles.h"
Ben Murdoche0cee9b2011-05-25 10:26:03 +010038#include "mark-compact.h"
Steve Block44f0eee2011-05-26 01:26:41 +010039#include "platform.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010040#include "scopeinfo.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010041
42namespace v8 {
43namespace internal {
44
45
46class PendingListNode : public Malloced {
47 public:
48 explicit PendingListNode(JSFunction* function);
49 ~PendingListNode() { Destroy(); }
50
51 PendingListNode* next() const { return next_; }
52 void set_next(PendingListNode* node) { next_ = node; }
53 Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
54
55 // If the function is garbage collected before we've had the chance
56 // to optimize it the weak handle will be null.
57 bool IsValid() { return !function_.is_null(); }
58
59 // Returns the number of microseconds this node has been pending.
60 int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
61
62 private:
63 void Destroy();
64 static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
65
66 PendingListNode* next_;
67 Handle<Object> function_; // Weak handle.
68 int64_t start_;
69};
70
71
Ben Murdochb0fe1622011-05-05 13:52:32 +010072// Optimization sampler constants.
73static const int kSamplerFrameCount = 2;
74static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
Ben Murdochb0fe1622011-05-05 13:52:32 +010075
76static const int kSamplerTicksBetweenThresholdAdjustment = 32;
77
78static const int kSamplerThresholdInit = 3;
79static const int kSamplerThresholdMin = 1;
80static const int kSamplerThresholdDelta = 1;
81
82static const int kSamplerThresholdSizeFactorInit = 3;
83static const int kSamplerThresholdSizeFactorMin = 1;
84static const int kSamplerThresholdSizeFactorDelta = 1;
85
86static const int kSizeLimit = 1500;
87
Ben Murdochb0fe1622011-05-05 13:52:32 +010088
89PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +010090 GlobalHandles* global_handles = Isolate::Current()->global_handles();
91 function_ = global_handles->Create(function);
Ben Murdochb0fe1622011-05-05 13:52:32 +010092 start_ = OS::Ticks();
Steve Block44f0eee2011-05-26 01:26:41 +010093 global_handles->MakeWeak(function_.location(), this, &WeakCallback);
Ben Murdochb0fe1622011-05-05 13:52:32 +010094}
95
96
97void PendingListNode::Destroy() {
98 if (!IsValid()) return;
Steve Block44f0eee2011-05-26 01:26:41 +010099 GlobalHandles* global_handles = Isolate::Current()->global_handles();
100 global_handles->Destroy(function_.location());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100101 function_= Handle<Object>::null();
102}
103
104
105void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
106 reinterpret_cast<PendingListNode*>(data)->Destroy();
107}
108
109
Steve Block44f0eee2011-05-26 01:26:41 +0100110Atomic32 RuntimeProfiler::state_ = 0;
111// TODO(isolates): Create the semaphore lazily and clean it up when no
112// longer required.
113#ifdef ENABLE_LOGGING_AND_PROFILING
114Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
115#endif
116
Ben Murdoch8b112d22011-06-08 16:22:53 +0100117#ifdef DEBUG
118bool RuntimeProfiler::has_been_globally_setup_ = false;
119#endif
120bool RuntimeProfiler::enabled_ = false;
121
Steve Block44f0eee2011-05-26 01:26:41 +0100122
123RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
124 : isolate_(isolate),
125 sampler_threshold_(kSamplerThresholdInit),
126 sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
127 sampler_ticks_until_threshold_adjustment_(
128 kSamplerTicksBetweenThresholdAdjustment),
129 js_ratio_(0),
130 sampler_window_position_(0),
131 optimize_soon_list_(NULL),
Ben Murdoch8b112d22011-06-08 16:22:53 +0100132 state_window_position_(0),
133 state_window_ticks_(0) {
134 state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
135 state_counts_[IN_JS_STATE] = 0;
136 STATIC_ASSERT(IN_NON_JS_STATE == 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100137 memset(state_window_, 0, sizeof(state_window_));
138 ClearSampleBuffer();
139}
140
141
Ben Murdoch8b112d22011-06-08 16:22:53 +0100142void RuntimeProfiler::GlobalSetup() {
143 ASSERT(!has_been_globally_setup_);
144 enabled_ = V8::UseCrankshaft() && FLAG_opt;
145#ifdef DEBUG
146 has_been_globally_setup_ = true;
147#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100148}
149
150
151void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100152 ASSERT(function->IsOptimizable());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100153 if (FLAG_trace_opt) {
154 PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
155 function->PrintName();
Ben Murdoch257744e2011-11-30 15:57:28 +0000156 PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100157 PrintF(" for recompilation");
158 if (delay > 0) {
159 PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
160 }
161 PrintF("]\n");
162 }
163
164 // The next call to the function will trigger optimization.
165 function->MarkForLazyRecompilation();
166}
167
168
Steve Block44f0eee2011-05-26 01:26:41 +0100169void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100170 // See AlwaysFullCompiler (in compiler.cc) comment on why we need
171 // Debug::has_break_points().
172 ASSERT(function->IsMarkedForLazyRecompilation());
Steve Block44f0eee2011-05-26 01:26:41 +0100173 if (!FLAG_use_osr ||
Ben Murdoch257744e2011-11-30 15:57:28 +0000174 isolate_->DebuggerHasBreakPoints() ||
Steve Block44f0eee2011-05-26 01:26:41 +0100175 function->IsBuiltin()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100176 return;
177 }
178
179 SharedFunctionInfo* shared = function->shared();
Ben Murdochb8e0da22011-05-16 14:20:40 +0100180 // If the code is not optimizable or references context slots, don't try OSR.
181 if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
182 return;
183 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100184
185 // We are not prepared to do OSR for a function that already has an
186 // allocated arguments object. The optimized code would bypass it for
187 // arguments accesses, which is unsound. Don't try OSR.
188 if (shared->scope_info()->HasArgumentsShadow()) return;
189
190 // We're using on-stack replacement: patch the unoptimized code so that
191 // any back edge in any unoptimized frame will trigger on-stack
192 // replacement for that frame.
193 if (FLAG_trace_osr) {
194 PrintF("[patching stack checks in ");
195 function->PrintName();
196 PrintF(" for on-stack replacement]\n");
197 }
198
199 // Get the stack check stub code object to match against. We aren't
200 // prepared to generate it, but we don't expect to have to.
201 StackCheckStub check_stub;
202 Object* check_code;
203 MaybeObject* maybe_check_code = check_stub.TryGetCode();
204 if (maybe_check_code->ToObject(&check_code)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100205 Code* replacement_code =
206 isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100207 Code* unoptimized_code = shared->code();
Steve Block1e0659c2011-05-24 12:43:12 +0100208 Deoptimizer::PatchStackCheckCode(unoptimized_code,
209 Code::cast(check_code),
210 replacement_code);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100211 }
212}
213
214
Steve Block44f0eee2011-05-26 01:26:41 +0100215void RuntimeProfiler::ClearSampleBuffer() {
216 memset(sampler_window_, 0, sizeof(sampler_window_));
217 memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100218}
219
220
Steve Block44f0eee2011-05-26 01:26:41 +0100221int RuntimeProfiler::LookupSample(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100222 int weight = 0;
223 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100224 Object* sample = sampler_window_[i];
Ben Murdochb0fe1622011-05-05 13:52:32 +0100225 if (sample != NULL) {
226 if (function == sample) {
Steve Block44f0eee2011-05-26 01:26:41 +0100227 weight += sampler_window_weight_[i];
Ben Murdochb0fe1622011-05-05 13:52:32 +0100228 }
229 }
230 }
231 return weight;
232}
233
234
Steve Block44f0eee2011-05-26 01:26:41 +0100235void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100236 ASSERT(IsPowerOf2(kSamplerWindowSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100237 sampler_window_[sampler_window_position_] = function;
238 sampler_window_weight_[sampler_window_position_] = weight;
239 sampler_window_position_ = (sampler_window_position_ + 1) &
Ben Murdochb0fe1622011-05-05 13:52:32 +0100240 (kSamplerWindowSize - 1);
241}
242
243
244void RuntimeProfiler::OptimizeNow() {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 HandleScope scope(isolate_);
246 PendingListNode* current = optimize_soon_list_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100247 while (current != NULL) {
248 PendingListNode* next = current->next();
249 if (current->IsValid()) {
250 Handle<JSFunction> function = current->function();
251 int delay = current->Delay();
Ben Murdoch8b112d22011-06-08 16:22:53 +0100252 if (function->IsOptimizable()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100253 Optimize(*function, true, delay);
254 }
255 }
256 delete current;
257 current = next;
258 }
Steve Block44f0eee2011-05-26 01:26:41 +0100259 optimize_soon_list_ = NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100260
261 // Run through the JavaScript frames and collect them. If we already
262 // have a sample of the function, we mark it for optimizations
263 // (eagerly or lazily).
264 JSFunction* samples[kSamplerFrameCount];
265 int sample_count = 0;
266 int frame_count = 0;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100267 for (JavaScriptFrameIterator it(isolate_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100268 frame_count++ < kSamplerFrameCount && !it.done();
269 it.Advance()) {
270 JavaScriptFrame* frame = it.frame();
271 JSFunction* function = JSFunction::cast(frame->function());
272
273 // Adjust threshold each time we have processed
274 // a certain number of ticks.
Steve Block44f0eee2011-05-26 01:26:41 +0100275 if (sampler_ticks_until_threshold_adjustment_ > 0) {
276 sampler_ticks_until_threshold_adjustment_--;
277 if (sampler_ticks_until_threshold_adjustment_ <= 0) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100278 // If the threshold is not already at the minimum
279 // modify and reset the ticks until next adjustment.
Steve Block44f0eee2011-05-26 01:26:41 +0100280 if (sampler_threshold_ > kSamplerThresholdMin) {
281 sampler_threshold_ -= kSamplerThresholdDelta;
282 sampler_ticks_until_threshold_adjustment_ =
Ben Murdochb0fe1622011-05-05 13:52:32 +0100283 kSamplerTicksBetweenThresholdAdjustment;
284 }
285 }
286 }
287
288 if (function->IsMarkedForLazyRecompilation()) {
289 Code* unoptimized = function->shared()->code();
290 int nesting = unoptimized->allow_osr_at_loop_nesting_level();
291 if (nesting == 0) AttemptOnStackReplacement(function);
292 int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
293 unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
294 }
295
296 // Do not record non-optimizable functions.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100297 if (!function->IsOptimizable()) continue;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100298 samples[sample_count++] = function;
299
300 int function_size = function->shared()->SourceSize();
301 int threshold_size_factor = (function_size > kSizeLimit)
Steve Block44f0eee2011-05-26 01:26:41 +0100302 ? sampler_threshold_size_factor_
Ben Murdochb0fe1622011-05-05 13:52:32 +0100303 : 1;
304
Steve Block44f0eee2011-05-26 01:26:41 +0100305 int threshold = sampler_threshold_ * threshold_size_factor;
306 int current_js_ratio = NoBarrier_Load(&js_ratio_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100307
308 // Adjust threshold depending on the ratio of time spent
309 // in JS code.
310 if (current_js_ratio < 20) {
311 // If we spend less than 20% of the time in JS code,
312 // do not optimize.
313 continue;
314 } else if (current_js_ratio < 75) {
315 // Below 75% of time spent in JS code, only optimize very
316 // frequently used functions.
317 threshold *= 3;
318 }
319
320 if (LookupSample(function) >= threshold) {
321 Optimize(function, false, 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100322 isolate_->compilation_cache()->MarkForEagerOptimizing(
323 Handle<JSFunction>(function));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100324 }
325 }
326
327 // Add the collected functions as samples. It's important not to do
328 // this as part of collecting them because this will interfere with
329 // the sample lookup in case of recursive functions.
330 for (int i = 0; i < sample_count; i++) {
331 AddSample(samples[i], kSamplerFrameWeight[i]);
332 }
333}
334
335
336void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100337 if (!function->IsOptimizable()) return;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100338 PendingListNode* node = new PendingListNode(function);
Steve Block44f0eee2011-05-26 01:26:41 +0100339 node->set_next(optimize_soon_list_);
340 optimize_soon_list_ = node;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100341}
342
343
344#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +0100345void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
346 SamplerState old_state = state_window_[state_window_position_];
347 state_counts_[old_state]--;
348 state_window_[state_window_position_] = current_state;
349 state_counts_[current_state]++;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100350 ASSERT(IsPowerOf2(kStateWindowSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100351 state_window_position_ = (state_window_position_ + 1) &
Ben Murdochb0fe1622011-05-05 13:52:32 +0100352 (kStateWindowSize - 1);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100353 // Note: to calculate correct ratio we have to track how many valid
354 // ticks are actually in the state window, because on profiler
355 // startup this number can be less than the window size.
356 state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100357 NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
Ben Murdoch8b112d22011-06-08 16:22:53 +0100358 state_window_ticks_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100359}
360#endif
361
362
363void RuntimeProfiler::NotifyTick() {
364#ifdef ENABLE_LOGGING_AND_PROFILING
365 // Record state sample.
Steve Block44f0eee2011-05-26 01:26:41 +0100366 SamplerState state = IsSomeIsolateInJS()
Ben Murdochb0fe1622011-05-05 13:52:32 +0100367 ? IN_JS_STATE
368 : IN_NON_JS_STATE;
369 UpdateStateRatio(state);
Steve Block44f0eee2011-05-26 01:26:41 +0100370 isolate_->stack_guard()->RequestRuntimeProfilerTick();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100371#endif
372}
373
374
Ben Murdochb0fe1622011-05-05 13:52:32 +0100375void RuntimeProfiler::Setup() {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100376 ASSERT(has_been_globally_setup_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100377 ClearSampleBuffer();
378 // If the ticker hasn't already started, make sure to do so to get
379 // the ticks for the runtime profiler.
Steve Block44f0eee2011-05-26 01:26:41 +0100380 if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100381}
382
383
384void RuntimeProfiler::Reset() {
Steve Block44f0eee2011-05-26 01:26:41 +0100385 sampler_threshold_ = kSamplerThresholdInit;
386 sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
387 sampler_ticks_until_threshold_adjustment_ =
Ben Murdochb0fe1622011-05-05 13:52:32 +0100388 kSamplerTicksBetweenThresholdAdjustment;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100389}
390
391
392void RuntimeProfiler::TearDown() {
393 // Nothing to do.
394}
395
396
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100397int RuntimeProfiler::SamplerWindowSize() {
398 return kSamplerWindowSize;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100399}
400
401
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100402// Update the pointers in the sampler window after a GC.
403void RuntimeProfiler::UpdateSamplesAfterScavenge() {
404 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100405 Object* function = sampler_window_[i];
406 if (function != NULL && isolate_->heap()->InNewSpace(function)) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100407 MapWord map_word = HeapObject::cast(function)->map_word();
408 if (map_word.IsForwardingAddress()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100409 sampler_window_[i] = map_word.ToForwardingAddress();
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100410 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100411 sampler_window_[i] = NULL;
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100412 }
413 }
414 }
415}
416
417
Steve Block44f0eee2011-05-26 01:26:41 +0100418void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
419#ifdef ENABLE_LOGGING_AND_PROFILING
420 // The profiler thread must still be waiting.
421 ASSERT(NoBarrier_Load(&state_) >= 0);
422 // In IsolateEnteredJS we have already incremented the counter and
423 // undid the decrement done by the profiler thread. Increment again
424 // to get the right count of active isolates.
425 NoBarrier_AtomicIncrement(&state_, 1);
426 semaphore_->Signal();
427 isolate->ResetEagerOptimizingData();
428#endif
429}
430
431
432bool RuntimeProfiler::IsSomeIsolateInJS() {
433 return NoBarrier_Load(&state_) > 0;
434}
435
436
437bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
438#ifdef ENABLE_LOGGING_AND_PROFILING
439 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
440 ASSERT(old_state >= -1);
441 if (old_state != 0) return false;
442 semaphore_->Wait();
443#endif
444 return true;
445}
446
447
448void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
449#ifdef ENABLE_LOGGING_AND_PROFILING
450 semaphore_->Signal();
451#endif
452}
453
454
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100455void RuntimeProfiler::RemoveDeadSamples() {
456 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100457 Object* function = sampler_window_[i];
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100458 if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100459 sampler_window_[i] = NULL;
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100460 }
461 }
462}
463
464
465void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
466 for (int i = 0; i < kSamplerWindowSize; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +0100467 visitor->VisitPointer(&sampler_window_[i]);
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100468 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100469}
470
471
472bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
473#ifdef ENABLE_LOGGING_AND_PROFILING
474 static const int kNonJSTicksThreshold = 100;
Steve Block44f0eee2011-05-26 01:26:41 +0100475 if (RuntimeProfiler::IsSomeIsolateInJS()) {
476 non_js_ticks_ = 0;
477 } else {
478 if (non_js_ticks_ < kNonJSTicksThreshold) {
479 ++non_js_ticks_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100480 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100481 return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100482 }
483 }
484#endif
485 return false;
486}
487
488
489} } // namespace v8::internal