blob: acf3349be4fe8bfe901d7a1f77376fbf1caf84b7 [file] [log] [blame]
Steve Block6ded16b2010-05-10 14:33:55 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "cpu-profiler-inl.h"
31
32#ifdef ENABLE_LOGGING_AND_PROFILING
33
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010034#include "frames-inl.h"
Kristian Monsen0d5e1162010-09-30 15:31:59 +010035#include "hashmap.h"
Steve Block6ded16b2010-05-10 14:33:55 +010036#include "log-inl.h"
37
38#include "../include/v8-profiler.h"
39
40namespace v8 {
41namespace internal {
42
43static const int kEventsBufferSize = 256*KB;
44static const int kTickSamplesBufferChunkSize = 64*KB;
45static const int kTickSamplesBufferChunksCount = 16;
46
47
48ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
49 : generator_(generator),
Steve Block791712a2010-08-27 10:21:07 +010050 running_(true),
Steve Block6ded16b2010-05-10 14:33:55 +010051 ticks_buffer_(sizeof(TickSampleEventRecord),
52 kTickSamplesBufferChunkSize,
53 kTickSamplesBufferChunksCount),
Kristian Monsen0d5e1162010-09-30 15:31:59 +010054 enqueue_order_(0),
55 known_functions_(new HashMap(AddressesMatch)) {
56}
57
58
59ProfilerEventsProcessor::~ProfilerEventsProcessor() {
60 delete known_functions_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010061}
Steve Block6ded16b2010-05-10 14:33:55 +010062
63
64void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
65 const char* prefix,
66 String* name,
67 Address start) {
68 if (FilterOutCodeCreateEvent(tag)) return;
69 CodeEventsContainer evt_rec;
70 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
71 rec->type = CodeEventRecord::CODE_CREATION;
72 rec->order = ++enqueue_order_;
73 rec->start = start;
74 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
75 rec->size = 1;
76 events_buffer_.Enqueue(evt_rec);
77}
78
79
80void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
81 String* name,
82 String* resource_name,
83 int line_number,
84 Address start,
85 unsigned size) {
86 if (FilterOutCodeCreateEvent(tag)) return;
87 CodeEventsContainer evt_rec;
88 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
89 rec->type = CodeEventRecord::CODE_CREATION;
90 rec->order = ++enqueue_order_;
91 rec->start = start;
92 rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
93 rec->size = size;
94 events_buffer_.Enqueue(evt_rec);
95}
96
97
98void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
99 const char* name,
100 Address start,
101 unsigned size) {
102 if (FilterOutCodeCreateEvent(tag)) return;
103 CodeEventsContainer evt_rec;
104 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
105 rec->type = CodeEventRecord::CODE_CREATION;
106 rec->order = ++enqueue_order_;
107 rec->start = start;
108 rec->entry = generator_->NewCodeEntry(tag, name);
109 rec->size = size;
110 events_buffer_.Enqueue(evt_rec);
111}
112
113
114void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
115 int args_count,
116 Address start,
117 unsigned size) {
118 if (FilterOutCodeCreateEvent(tag)) return;
119 CodeEventsContainer evt_rec;
120 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
121 rec->type = CodeEventRecord::CODE_CREATION;
122 rec->order = ++enqueue_order_;
123 rec->start = start;
124 rec->entry = generator_->NewCodeEntry(tag, args_count);
125 rec->size = size;
126 events_buffer_.Enqueue(evt_rec);
127}
128
129
130void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
131 CodeEventsContainer evt_rec;
132 CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
133 rec->type = CodeEventRecord::CODE_MOVE;
134 rec->order = ++enqueue_order_;
135 rec->from = from;
136 rec->to = to;
137 events_buffer_.Enqueue(evt_rec);
138}
139
140
141void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
142 CodeEventsContainer evt_rec;
143 CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
144 rec->type = CodeEventRecord::CODE_DELETE;
145 rec->order = ++enqueue_order_;
146 rec->start = from;
147 events_buffer_.Enqueue(evt_rec);
148}
149
150
151void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Leon Clarkef7060e22010-06-03 12:02:55 +0100152 Address start,
153 int security_token_id) {
Steve Block6ded16b2010-05-10 14:33:55 +0100154 CodeEventsContainer evt_rec;
155 CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
156 rec->type = CodeEventRecord::CODE_ALIAS;
157 rec->order = ++enqueue_order_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100158 rec->start = alias;
159 rec->entry = generator_->NewCodeEntry(security_token_id);
160 rec->code_start = start;
Steve Block6ded16b2010-05-10 14:33:55 +0100161 events_buffer_.Enqueue(evt_rec);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100162
163 known_functions_->Lookup(alias, AddressHash(alias), true);
Steve Block6ded16b2010-05-10 14:33:55 +0100164}
165
166
167void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
168 CodeMoveEvent(from, to);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100169
170 if (IsKnownFunction(from)) {
171 known_functions_->Remove(from, AddressHash(from));
172 known_functions_->Lookup(to, AddressHash(to), true);
173 }
Steve Block6ded16b2010-05-10 14:33:55 +0100174}
175
176
177void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
178 CodeDeleteEvent(from);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100179
180 known_functions_->Remove(from, AddressHash(from));
181}
182
183
184bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
185 HashMap::Entry* entry =
186 known_functions_->Lookup(start, AddressHash(start), false);
187 return entry != NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100188}
189
190
191void ProfilerEventsProcessor::RegExpCodeCreateEvent(
192 Logger::LogEventsAndTags tag,
193 const char* prefix,
194 String* name,
195 Address start,
196 unsigned size) {
197 if (FilterOutCodeCreateEvent(tag)) return;
198 CodeEventsContainer evt_rec;
199 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
200 rec->type = CodeEventRecord::CODE_CREATION;
201 rec->order = ++enqueue_order_;
202 rec->start = start;
203 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
204 rec->size = size;
205 events_buffer_.Enqueue(evt_rec);
206}
207
208
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100209void ProfilerEventsProcessor::AddCurrentStack() {
210 TickSampleEventRecord record;
211 TickSample* sample = &record.sample;
212 sample->state = VMState::current_state();
213 sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
214 sample->frames_count = 0;
215 for (StackTraceFrameIterator it;
216 !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
217 it.Advance()) {
218 JavaScriptFrame* frame = it.frame();
219 sample->stack[sample->frames_count++] =
220 reinterpret_cast<Address>(frame->function());
221 }
222 record.order = enqueue_order_;
223 ticks_from_vm_buffer_.Enqueue(record);
224}
225
226
Steve Block6ded16b2010-05-10 14:33:55 +0100227bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
228 if (!events_buffer_.IsEmpty()) {
229 CodeEventsContainer record;
230 events_buffer_.Dequeue(&record);
231 switch (record.generic.type) {
232#define PROFILER_TYPE_CASE(type, clss) \
233 case CodeEventRecord::type: \
234 record.clss##_.UpdateCodeMap(generator_->code_map()); \
235 break;
236
237 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
238
239#undef PROFILER_TYPE_CASE
240 default: return true; // Skip record.
241 }
242 *dequeue_order = record.generic.order;
243 return true;
244 }
245 return false;
246}
247
248
249bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
250 while (true) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100251 if (!ticks_from_vm_buffer_.IsEmpty()
252 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
253 TickSampleEventRecord record;
254 ticks_from_vm_buffer_.Dequeue(&record);
255 generator_->RecordTickSample(record.sample);
256 }
257
Steve Block6ded16b2010-05-10 14:33:55 +0100258 const TickSampleEventRecord* rec =
259 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100260 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
Iain Merrick9ac36c92010-09-13 15:29:50 +0100261 // Make a local copy of tick sample record to ensure that it won't
262 // be modified as we are processing it. This is possible as the
263 // sampler writes w/o any sync to the queue, so if the processor
264 // will get far behind, a record may be modified right under its
265 // feet.
266 TickSampleEventRecord record = *rec;
267 if (record.order == dequeue_order) {
268 // A paranoid check to make sure that we don't get a memory overrun
269 // in case of frames_count having a wild value.
270 if (record.sample.frames_count < 0
271 || record.sample.frames_count >= TickSample::kMaxFramesCount)
272 record.sample.frames_count = 0;
273 generator_->RecordTickSample(record.sample);
Steve Block6ded16b2010-05-10 14:33:55 +0100274 ticks_buffer_.FinishDequeue();
275 } else {
276 return true;
277 }
278 }
279}
280
281
282void ProfilerEventsProcessor::Run() {
283 unsigned dequeue_order = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100284
285 while (running_) {
286 // Process ticks until we have any.
287 if (ProcessTicks(dequeue_order)) {
288 // All ticks of the current dequeue_order are processed,
289 // proceed to the next code event.
290 ProcessCodeEvent(&dequeue_order);
291 }
292 YieldCPU();
293 }
294
295 // Process remaining tick events.
296 ticks_buffer_.FlushResidualRecords();
297 // Perform processing until we have tick events, skip remaining code events.
298 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
299}
300
301
302CpuProfiler* CpuProfiler::singleton_ = NULL;
303
304void CpuProfiler::StartProfiling(const char* title) {
305 ASSERT(singleton_ != NULL);
306 singleton_->StartCollectingProfile(title);
307}
308
309
310void CpuProfiler::StartProfiling(String* title) {
311 ASSERT(singleton_ != NULL);
312 singleton_->StartCollectingProfile(title);
313}
314
315
316CpuProfile* CpuProfiler::StopProfiling(const char* title) {
317 return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
318}
319
320
Leon Clarkef7060e22010-06-03 12:02:55 +0100321CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
322 return is_profiling() ?
323 singleton_->StopCollectingProfile(security_token, title) : NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100324}
325
326
327int CpuProfiler::GetProfilesCount() {
328 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100329 // The count of profiles doesn't depend on a security token.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100330 return singleton_->profiles_->Profiles(
331 TokenEnumerator::kNoSecurityToken)->length();
Steve Block6ded16b2010-05-10 14:33:55 +0100332}
333
334
Leon Clarkef7060e22010-06-03 12:02:55 +0100335CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
Steve Block6ded16b2010-05-10 14:33:55 +0100336 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100337 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
338 return singleton_->profiles_->Profiles(token)->at(index);
Steve Block6ded16b2010-05-10 14:33:55 +0100339}
340
341
Leon Clarkef7060e22010-06-03 12:02:55 +0100342CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
Steve Block6ded16b2010-05-10 14:33:55 +0100343 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100344 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
345 return singleton_->profiles_->GetProfile(token, uid);
Steve Block6ded16b2010-05-10 14:33:55 +0100346}
347
348
349TickSample* CpuProfiler::TickSampleEvent() {
350 if (CpuProfiler::is_profiling()) {
351 return singleton_->processor_->TickSampleEvent();
352 } else {
353 return NULL;
354 }
355}
356
357
358void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
359 singleton_->processor_->CallbackCreateEvent(
360 Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
361}
362
363
364void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
365 Code* code, const char* comment) {
366 singleton_->processor_->CodeCreateEvent(
367 tag, comment, code->address(), code->ExecutableSize());
368}
369
370
371void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
372 Code* code, String* name) {
373 singleton_->processor_->CodeCreateEvent(
374 tag,
375 name,
376 Heap::empty_string(),
377 v8::CpuProfileNode::kNoLineNumberInfo,
378 code->address(),
379 code->ExecutableSize());
380}
381
382
383void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
384 Code* code, String* name,
385 String* source, int line) {
386 singleton_->processor_->CodeCreateEvent(
387 tag,
388 name,
389 source,
390 line,
391 code->address(),
392 code->ExecutableSize());
393}
394
395
396void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
397 Code* code, int args_count) {
398 singleton_->processor_->CodeCreateEvent(
399 tag,
400 args_count,
401 code->address(),
402 code->ExecutableSize());
403}
404
405
406void CpuProfiler::CodeMoveEvent(Address from, Address to) {
407 singleton_->processor_->CodeMoveEvent(from, to);
408}
409
410
411void CpuProfiler::CodeDeleteEvent(Address from) {
412 singleton_->processor_->CodeDeleteEvent(from);
413}
414
415
416void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100417 int security_token_id = TokenEnumerator::kNoSecurityToken;
Leon Clarkef7060e22010-06-03 12:02:55 +0100418 if (function->unchecked_context()->IsContext()) {
419 security_token_id = singleton_->token_enumerator_->GetTokenId(
420 function->context()->global_context()->security_token());
421 }
Steve Block6ded16b2010-05-10 14:33:55 +0100422 singleton_->processor_->FunctionCreateEvent(
Leon Clarkef7060e22010-06-03 12:02:55 +0100423 function->address(),
424 function->code()->address(),
425 security_token_id);
Steve Block6ded16b2010-05-10 14:33:55 +0100426}
427
428
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100429void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
430 HeapObject* source) {
431 // This function is called from GC iterators (during Scavenge,
432 // MC, and MS), so marking bits can be set on objects. That's
433 // why unchecked accessors are used here.
434
435 // The same function can be reported several times.
436 if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
437 || singleton_->processor_->IsKnownFunction(function->address())) return;
438
439 int security_token_id = TokenEnumerator::kNoSecurityToken;
440 // In debug mode, assertions may fail for contexts,
441 // and we can live without security tokens in debug mode.
442#ifndef DEBUG
443 if (function->unchecked_context()->IsContext()) {
444 security_token_id = singleton_->token_enumerator_->GetTokenId(
445 function->context()->global_context()->security_token());
446 }
447 // Security token may not be moved yet.
448 if (security_token_id == TokenEnumerator::kNoSecurityToken) {
449 JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
450 if (old_function->unchecked_context()->IsContext()) {
451 security_token_id = singleton_->token_enumerator_->GetTokenId(
452 old_function->context()->global_context()->security_token());
453 }
454 }
455#endif
456 singleton_->processor_->FunctionCreateEvent(
457 function->address(),
458 function->unchecked_code()->address(),
459 security_token_id);
460}
461
462
Steve Block6ded16b2010-05-10 14:33:55 +0100463void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
464 singleton_->processor_->FunctionMoveEvent(from, to);
465}
466
467
468void CpuProfiler::FunctionDeleteEvent(Address from) {
469 singleton_->processor_->FunctionDeleteEvent(from);
470}
471
472
473void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
474 singleton_->processor_->CallbackCreateEvent(
475 Logger::CALLBACK_TAG, "get ", name, entry_point);
476}
477
478
479void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
480 singleton_->processor_->RegExpCodeCreateEvent(
481 Logger::REG_EXP_TAG,
482 "RegExp: ",
483 source,
484 code->address(),
485 code->ExecutableSize());
486}
487
488
489void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
490 singleton_->processor_->CallbackCreateEvent(
491 Logger::CALLBACK_TAG, "set ", name, entry_point);
492}
493
494
495CpuProfiler::CpuProfiler()
496 : profiles_(new CpuProfilesCollection()),
497 next_profile_uid_(1),
Leon Clarkef7060e22010-06-03 12:02:55 +0100498 token_enumerator_(new TokenEnumerator()),
Steve Block6ded16b2010-05-10 14:33:55 +0100499 generator_(NULL),
500 processor_(NULL) {
501}
502
503
504CpuProfiler::~CpuProfiler() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100505 delete token_enumerator_;
Steve Block6ded16b2010-05-10 14:33:55 +0100506 delete profiles_;
507}
508
509
510void CpuProfiler::StartCollectingProfile(const char* title) {
511 if (profiles_->StartProfiling(title, next_profile_uid_++)) {
512 StartProcessorIfNotStarted();
513 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100514 processor_->AddCurrentStack();
Steve Block6ded16b2010-05-10 14:33:55 +0100515}
516
517
518void CpuProfiler::StartCollectingProfile(String* title) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100519 StartCollectingProfile(profiles_->GetName(title));
Steve Block6ded16b2010-05-10 14:33:55 +0100520}
521
522
523void CpuProfiler::StartProcessorIfNotStarted() {
524 if (processor_ == NULL) {
525 // Disable logging when using the new implementation.
526 saved_logging_nesting_ = Logger::logging_nesting_;
527 Logger::logging_nesting_ = 0;
528 generator_ = new ProfileGenerator(profiles_);
529 processor_ = new ProfilerEventsProcessor(generator_);
530 processor_->Start();
Steve Block6ded16b2010-05-10 14:33:55 +0100531 // Enumerate stuff we already have in the heap.
532 if (Heap::HasBeenSetup()) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100533 if (!FLAG_prof_browser_mode) {
534 bool saved_log_code_flag = FLAG_log_code;
535 FLAG_log_code = true;
536 Logger::LogCodeObjects();
537 FLAG_log_code = saved_log_code_flag;
538 }
Steve Block6ded16b2010-05-10 14:33:55 +0100539 Logger::LogCompiledFunctions();
540 Logger::LogFunctionObjects();
541 Logger::LogAccessorCallbacks();
542 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100543 // Enable stack sampling.
544 reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
Steve Block6ded16b2010-05-10 14:33:55 +0100545 }
546}
547
548
549CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
550 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100551 StopProcessorIfLastProfile(title);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100552 CpuProfile* result =
553 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
554 title,
555 actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100556 if (result != NULL) {
557 result->Print();
558 }
559 return result;
560}
561
562
Leon Clarkef7060e22010-06-03 12:02:55 +0100563CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
564 String* title) {
Steve Block6ded16b2010-05-10 14:33:55 +0100565 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100566 const char* profile_title = profiles_->GetName(title);
567 StopProcessorIfLastProfile(profile_title);
Leon Clarkef7060e22010-06-03 12:02:55 +0100568 int token = token_enumerator_->GetTokenId(security_token);
Iain Merrick75681382010-08-19 15:07:18 +0100569 return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100570}
571
572
Iain Merrick75681382010-08-19 15:07:18 +0100573void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
574 if (profiles_->IsLastProfile(title)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100575 reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
576 processor_->Stop();
577 processor_->Join();
578 delete processor_;
579 delete generator_;
580 processor_ = NULL;
581 generator_ = NULL;
582 Logger::logging_nesting_ = saved_logging_nesting_;
583 }
584}
585
586} } // namespace v8::internal
587
588#endif // ENABLE_LOGGING_AND_PROFILING
589
590namespace v8 {
591namespace internal {
592
593void CpuProfiler::Setup() {
594#ifdef ENABLE_LOGGING_AND_PROFILING
595 if (singleton_ == NULL) {
596 singleton_ = new CpuProfiler();
597 }
598#endif
599}
600
601
602void CpuProfiler::TearDown() {
603#ifdef ENABLE_LOGGING_AND_PROFILING
604 if (singleton_ != NULL) {
605 delete singleton_;
606 }
607 singleton_ = NULL;
608#endif
609}
610
611} } // namespace v8::internal