blob: da19a4501d918e6e5380de20453771cb2ab67d83 [file] [log] [blame]
Steve Block6ded16b2010-05-10 14:33:55 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "cpu-profiler-inl.h"
31
32#ifdef ENABLE_LOGGING_AND_PROFILING
33
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010034#include "frames-inl.h"
Kristian Monsen0d5e1162010-09-30 15:31:59 +010035#include "hashmap.h"
Steve Block6ded16b2010-05-10 14:33:55 +010036#include "log-inl.h"
37
38#include "../include/v8-profiler.h"
39
40namespace v8 {
41namespace internal {
42
43static const int kEventsBufferSize = 256*KB;
44static const int kTickSamplesBufferChunkSize = 64*KB;
45static const int kTickSamplesBufferChunksCount = 16;
46
47
48ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
49 : generator_(generator),
Steve Block791712a2010-08-27 10:21:07 +010050 running_(true),
Steve Block6ded16b2010-05-10 14:33:55 +010051 ticks_buffer_(sizeof(TickSampleEventRecord),
52 kTickSamplesBufferChunkSize,
53 kTickSamplesBufferChunksCount),
Kristian Monsen0d5e1162010-09-30 15:31:59 +010054 enqueue_order_(0),
55 known_functions_(new HashMap(AddressesMatch)) {
56}
57
58
59ProfilerEventsProcessor::~ProfilerEventsProcessor() {
60 delete known_functions_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010061}
Steve Block6ded16b2010-05-10 14:33:55 +010062
63
64void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
65 const char* prefix,
66 String* name,
67 Address start) {
68 if (FilterOutCodeCreateEvent(tag)) return;
69 CodeEventsContainer evt_rec;
70 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
71 rec->type = CodeEventRecord::CODE_CREATION;
72 rec->order = ++enqueue_order_;
73 rec->start = start;
74 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
75 rec->size = 1;
76 events_buffer_.Enqueue(evt_rec);
77}
78
79
80void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
81 String* name,
82 String* resource_name,
83 int line_number,
84 Address start,
85 unsigned size) {
86 if (FilterOutCodeCreateEvent(tag)) return;
87 CodeEventsContainer evt_rec;
88 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
89 rec->type = CodeEventRecord::CODE_CREATION;
90 rec->order = ++enqueue_order_;
91 rec->start = start;
92 rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
93 rec->size = size;
94 events_buffer_.Enqueue(evt_rec);
95}
96
97
98void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
99 const char* name,
100 Address start,
101 unsigned size) {
102 if (FilterOutCodeCreateEvent(tag)) return;
103 CodeEventsContainer evt_rec;
104 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
105 rec->type = CodeEventRecord::CODE_CREATION;
106 rec->order = ++enqueue_order_;
107 rec->start = start;
108 rec->entry = generator_->NewCodeEntry(tag, name);
109 rec->size = size;
110 events_buffer_.Enqueue(evt_rec);
111}
112
113
114void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
115 int args_count,
116 Address start,
117 unsigned size) {
118 if (FilterOutCodeCreateEvent(tag)) return;
119 CodeEventsContainer evt_rec;
120 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
121 rec->type = CodeEventRecord::CODE_CREATION;
122 rec->order = ++enqueue_order_;
123 rec->start = start;
124 rec->entry = generator_->NewCodeEntry(tag, args_count);
125 rec->size = size;
126 events_buffer_.Enqueue(evt_rec);
127}
128
129
130void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
131 CodeEventsContainer evt_rec;
132 CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
133 rec->type = CodeEventRecord::CODE_MOVE;
134 rec->order = ++enqueue_order_;
135 rec->from = from;
136 rec->to = to;
137 events_buffer_.Enqueue(evt_rec);
138}
139
140
141void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
142 CodeEventsContainer evt_rec;
143 CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
144 rec->type = CodeEventRecord::CODE_DELETE;
145 rec->order = ++enqueue_order_;
146 rec->start = from;
147 events_buffer_.Enqueue(evt_rec);
148}
149
150
151void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Leon Clarkef7060e22010-06-03 12:02:55 +0100152 Address start,
153 int security_token_id) {
Steve Block6ded16b2010-05-10 14:33:55 +0100154 CodeEventsContainer evt_rec;
155 CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
156 rec->type = CodeEventRecord::CODE_ALIAS;
157 rec->order = ++enqueue_order_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100158 rec->start = alias;
159 rec->entry = generator_->NewCodeEntry(security_token_id);
160 rec->code_start = start;
Steve Block6ded16b2010-05-10 14:33:55 +0100161 events_buffer_.Enqueue(evt_rec);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100162
163 known_functions_->Lookup(alias, AddressHash(alias), true);
Steve Block6ded16b2010-05-10 14:33:55 +0100164}
165
166
167void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
168 CodeMoveEvent(from, to);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100169
170 if (IsKnownFunction(from)) {
171 known_functions_->Remove(from, AddressHash(from));
172 known_functions_->Lookup(to, AddressHash(to), true);
173 }
Steve Block6ded16b2010-05-10 14:33:55 +0100174}
175
176
177void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
178 CodeDeleteEvent(from);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100179
180 known_functions_->Remove(from, AddressHash(from));
181}
182
183
184bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
185 HashMap::Entry* entry =
186 known_functions_->Lookup(start, AddressHash(start), false);
187 return entry != NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100188}
189
190
Ben Murdochf87a2032010-10-22 12:50:53 +0100191void ProfilerEventsProcessor::ProcessMovedFunctions() {
192 for (int i = 0; i < moved_functions_.length(); ++i) {
193 JSFunction* function = moved_functions_[i];
194 CpuProfiler::FunctionCreateEvent(function);
195 }
196 moved_functions_.Clear();
197}
198
199
200void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
201 moved_functions_.Add(function);
202}
203
204
Steve Block6ded16b2010-05-10 14:33:55 +0100205void ProfilerEventsProcessor::RegExpCodeCreateEvent(
206 Logger::LogEventsAndTags tag,
207 const char* prefix,
208 String* name,
209 Address start,
210 unsigned size) {
211 if (FilterOutCodeCreateEvent(tag)) return;
212 CodeEventsContainer evt_rec;
213 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
214 rec->type = CodeEventRecord::CODE_CREATION;
215 rec->order = ++enqueue_order_;
216 rec->start = start;
217 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
218 rec->size = size;
219 events_buffer_.Enqueue(evt_rec);
220}
221
222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223void ProfilerEventsProcessor::AddCurrentStack() {
224 TickSampleEventRecord record;
225 TickSample* sample = &record.sample;
226 sample->state = VMState::current_state();
227 sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
228 sample->frames_count = 0;
229 for (StackTraceFrameIterator it;
230 !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
231 it.Advance()) {
232 JavaScriptFrame* frame = it.frame();
233 sample->stack[sample->frames_count++] =
234 reinterpret_cast<Address>(frame->function());
235 }
236 record.order = enqueue_order_;
237 ticks_from_vm_buffer_.Enqueue(record);
238}
239
240
Steve Block6ded16b2010-05-10 14:33:55 +0100241bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
242 if (!events_buffer_.IsEmpty()) {
243 CodeEventsContainer record;
244 events_buffer_.Dequeue(&record);
245 switch (record.generic.type) {
246#define PROFILER_TYPE_CASE(type, clss) \
247 case CodeEventRecord::type: \
248 record.clss##_.UpdateCodeMap(generator_->code_map()); \
249 break;
250
251 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
252
253#undef PROFILER_TYPE_CASE
254 default: return true; // Skip record.
255 }
256 *dequeue_order = record.generic.order;
257 return true;
258 }
259 return false;
260}
261
262
263bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
264 while (true) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100265 if (!ticks_from_vm_buffer_.IsEmpty()
266 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
267 TickSampleEventRecord record;
268 ticks_from_vm_buffer_.Dequeue(&record);
269 generator_->RecordTickSample(record.sample);
270 }
271
Steve Block6ded16b2010-05-10 14:33:55 +0100272 const TickSampleEventRecord* rec =
273 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100274 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
Iain Merrick9ac36c92010-09-13 15:29:50 +0100275 // Make a local copy of tick sample record to ensure that it won't
276 // be modified as we are processing it. This is possible as the
277 // sampler writes w/o any sync to the queue, so if the processor
278 // will get far behind, a record may be modified right under its
279 // feet.
280 TickSampleEventRecord record = *rec;
281 if (record.order == dequeue_order) {
282 // A paranoid check to make sure that we don't get a memory overrun
283 // in case of frames_count having a wild value.
284 if (record.sample.frames_count < 0
285 || record.sample.frames_count >= TickSample::kMaxFramesCount)
286 record.sample.frames_count = 0;
287 generator_->RecordTickSample(record.sample);
Steve Block6ded16b2010-05-10 14:33:55 +0100288 ticks_buffer_.FinishDequeue();
289 } else {
290 return true;
291 }
292 }
293}
294
295
296void ProfilerEventsProcessor::Run() {
297 unsigned dequeue_order = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100298
299 while (running_) {
300 // Process ticks until we have any.
301 if (ProcessTicks(dequeue_order)) {
302 // All ticks of the current dequeue_order are processed,
303 // proceed to the next code event.
304 ProcessCodeEvent(&dequeue_order);
305 }
306 YieldCPU();
307 }
308
309 // Process remaining tick events.
310 ticks_buffer_.FlushResidualRecords();
311 // Perform processing until we have tick events, skip remaining code events.
312 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
313}
314
315
316CpuProfiler* CpuProfiler::singleton_ = NULL;
317
318void CpuProfiler::StartProfiling(const char* title) {
319 ASSERT(singleton_ != NULL);
320 singleton_->StartCollectingProfile(title);
321}
322
323
324void CpuProfiler::StartProfiling(String* title) {
325 ASSERT(singleton_ != NULL);
326 singleton_->StartCollectingProfile(title);
327}
328
329
330CpuProfile* CpuProfiler::StopProfiling(const char* title) {
331 return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
332}
333
334
Leon Clarkef7060e22010-06-03 12:02:55 +0100335CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
336 return is_profiling() ?
337 singleton_->StopCollectingProfile(security_token, title) : NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100338}
339
340
341int CpuProfiler::GetProfilesCount() {
342 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100343 // The count of profiles doesn't depend on a security token.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100344 return singleton_->profiles_->Profiles(
345 TokenEnumerator::kNoSecurityToken)->length();
Steve Block6ded16b2010-05-10 14:33:55 +0100346}
347
348
Leon Clarkef7060e22010-06-03 12:02:55 +0100349CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
Steve Block6ded16b2010-05-10 14:33:55 +0100350 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100351 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
352 return singleton_->profiles_->Profiles(token)->at(index);
Steve Block6ded16b2010-05-10 14:33:55 +0100353}
354
355
Leon Clarkef7060e22010-06-03 12:02:55 +0100356CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
Steve Block6ded16b2010-05-10 14:33:55 +0100357 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100358 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
359 return singleton_->profiles_->GetProfile(token, uid);
Steve Block6ded16b2010-05-10 14:33:55 +0100360}
361
362
363TickSample* CpuProfiler::TickSampleEvent() {
364 if (CpuProfiler::is_profiling()) {
365 return singleton_->processor_->TickSampleEvent();
366 } else {
367 return NULL;
368 }
369}
370
371
372void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
373 singleton_->processor_->CallbackCreateEvent(
374 Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
375}
376
377
378void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
379 Code* code, const char* comment) {
380 singleton_->processor_->CodeCreateEvent(
381 tag, comment, code->address(), code->ExecutableSize());
382}
383
384
385void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
386 Code* code, String* name) {
387 singleton_->processor_->CodeCreateEvent(
388 tag,
389 name,
390 Heap::empty_string(),
391 v8::CpuProfileNode::kNoLineNumberInfo,
392 code->address(),
393 code->ExecutableSize());
394}
395
396
397void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
398 Code* code, String* name,
399 String* source, int line) {
400 singleton_->processor_->CodeCreateEvent(
401 tag,
402 name,
403 source,
404 line,
405 code->address(),
406 code->ExecutableSize());
407}
408
409
410void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
411 Code* code, int args_count) {
412 singleton_->processor_->CodeCreateEvent(
413 tag,
414 args_count,
415 code->address(),
416 code->ExecutableSize());
417}
418
419
420void CpuProfiler::CodeMoveEvent(Address from, Address to) {
421 singleton_->processor_->CodeMoveEvent(from, to);
422}
423
424
425void CpuProfiler::CodeDeleteEvent(Address from) {
426 singleton_->processor_->CodeDeleteEvent(from);
427}
428
429
430void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100431 int security_token_id = TokenEnumerator::kNoSecurityToken;
Leon Clarkef7060e22010-06-03 12:02:55 +0100432 if (function->unchecked_context()->IsContext()) {
433 security_token_id = singleton_->token_enumerator_->GetTokenId(
434 function->context()->global_context()->security_token());
435 }
Steve Block6ded16b2010-05-10 14:33:55 +0100436 singleton_->processor_->FunctionCreateEvent(
Leon Clarkef7060e22010-06-03 12:02:55 +0100437 function->address(),
438 function->code()->address(),
439 security_token_id);
Steve Block6ded16b2010-05-10 14:33:55 +0100440}
441
442
Ben Murdochf87a2032010-10-22 12:50:53 +0100443void CpuProfiler::ProcessMovedFunctions() {
444 singleton_->processor_->ProcessMovedFunctions();
445}
446
447
448void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100449 // This function is called from GC iterators (during Scavenge,
450 // MC, and MS), so marking bits can be set on objects. That's
451 // why unchecked accessors are used here.
452
453 // The same function can be reported several times.
454 if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
455 || singleton_->processor_->IsKnownFunction(function->address())) return;
456
Ben Murdochf87a2032010-10-22 12:50:53 +0100457 singleton_->processor_->RememberMovedFunction(function);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100458}
459
460
Steve Block6ded16b2010-05-10 14:33:55 +0100461void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
462 singleton_->processor_->FunctionMoveEvent(from, to);
463}
464
465
466void CpuProfiler::FunctionDeleteEvent(Address from) {
467 singleton_->processor_->FunctionDeleteEvent(from);
468}
469
470
471void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
472 singleton_->processor_->CallbackCreateEvent(
473 Logger::CALLBACK_TAG, "get ", name, entry_point);
474}
475
476
477void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
478 singleton_->processor_->RegExpCodeCreateEvent(
479 Logger::REG_EXP_TAG,
480 "RegExp: ",
481 source,
482 code->address(),
483 code->ExecutableSize());
484}
485
486
487void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
488 singleton_->processor_->CallbackCreateEvent(
489 Logger::CALLBACK_TAG, "set ", name, entry_point);
490}
491
492
493CpuProfiler::CpuProfiler()
494 : profiles_(new CpuProfilesCollection()),
495 next_profile_uid_(1),
Leon Clarkef7060e22010-06-03 12:02:55 +0100496 token_enumerator_(new TokenEnumerator()),
Steve Block6ded16b2010-05-10 14:33:55 +0100497 generator_(NULL),
498 processor_(NULL) {
499}
500
501
502CpuProfiler::~CpuProfiler() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100503 delete token_enumerator_;
Steve Block6ded16b2010-05-10 14:33:55 +0100504 delete profiles_;
505}
506
507
508void CpuProfiler::StartCollectingProfile(const char* title) {
509 if (profiles_->StartProfiling(title, next_profile_uid_++)) {
510 StartProcessorIfNotStarted();
511 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100512 processor_->AddCurrentStack();
Steve Block6ded16b2010-05-10 14:33:55 +0100513}
514
515
516void CpuProfiler::StartCollectingProfile(String* title) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100517 StartCollectingProfile(profiles_->GetName(title));
Steve Block6ded16b2010-05-10 14:33:55 +0100518}
519
520
521void CpuProfiler::StartProcessorIfNotStarted() {
522 if (processor_ == NULL) {
523 // Disable logging when using the new implementation.
524 saved_logging_nesting_ = Logger::logging_nesting_;
525 Logger::logging_nesting_ = 0;
526 generator_ = new ProfileGenerator(profiles_);
527 processor_ = new ProfilerEventsProcessor(generator_);
528 processor_->Start();
Steve Block6ded16b2010-05-10 14:33:55 +0100529 // Enumerate stuff we already have in the heap.
530 if (Heap::HasBeenSetup()) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100531 if (!FLAG_prof_browser_mode) {
532 bool saved_log_code_flag = FLAG_log_code;
533 FLAG_log_code = true;
534 Logger::LogCodeObjects();
535 FLAG_log_code = saved_log_code_flag;
536 }
Steve Block6ded16b2010-05-10 14:33:55 +0100537 Logger::LogCompiledFunctions();
538 Logger::LogFunctionObjects();
539 Logger::LogAccessorCallbacks();
540 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100541 // Enable stack sampling.
542 reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
Steve Block6ded16b2010-05-10 14:33:55 +0100543 }
544}
545
546
547CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
548 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100549 StopProcessorIfLastProfile(title);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100550 CpuProfile* result =
551 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
552 title,
553 actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100554 if (result != NULL) {
555 result->Print();
556 }
557 return result;
558}
559
560
Leon Clarkef7060e22010-06-03 12:02:55 +0100561CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
562 String* title) {
Steve Block6ded16b2010-05-10 14:33:55 +0100563 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100564 const char* profile_title = profiles_->GetName(title);
565 StopProcessorIfLastProfile(profile_title);
Leon Clarkef7060e22010-06-03 12:02:55 +0100566 int token = token_enumerator_->GetTokenId(security_token);
Iain Merrick75681382010-08-19 15:07:18 +0100567 return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100568}
569
570
Iain Merrick75681382010-08-19 15:07:18 +0100571void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
572 if (profiles_->IsLastProfile(title)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100573 reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
574 processor_->Stop();
575 processor_->Join();
576 delete processor_;
577 delete generator_;
578 processor_ = NULL;
579 generator_ = NULL;
580 Logger::logging_nesting_ = saved_logging_nesting_;
581 }
582}
583
584} } // namespace v8::internal
585
586#endif // ENABLE_LOGGING_AND_PROFILING
587
588namespace v8 {
589namespace internal {
590
591void CpuProfiler::Setup() {
592#ifdef ENABLE_LOGGING_AND_PROFILING
593 if (singleton_ == NULL) {
594 singleton_ = new CpuProfiler();
595 }
596#endif
597}
598
599
600void CpuProfiler::TearDown() {
601#ifdef ENABLE_LOGGING_AND_PROFILING
602 if (singleton_ != NULL) {
603 delete singleton_;
604 }
605 singleton_ = NULL;
606#endif
607}
608
609} } // namespace v8::internal