blob: fcf539f3bdf4b213205053c8e603afaa28157a87 [file] [log] [blame]
Steve Block6ded16b2010-05-10 14:33:55 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "cpu-profiler-inl.h"
31
32#ifdef ENABLE_LOGGING_AND_PROFILING
33
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010034#include "frames-inl.h"
Kristian Monsen0d5e1162010-09-30 15:31:59 +010035#include "hashmap.h"
Steve Block6ded16b2010-05-10 14:33:55 +010036#include "log-inl.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010037#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010038
39#include "../include/v8-profiler.h"
40
41namespace v8 {
42namespace internal {
43
44static const int kEventsBufferSize = 256*KB;
45static const int kTickSamplesBufferChunkSize = 64*KB;
46static const int kTickSamplesBufferChunksCount = 16;
47
48
49ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
Steve Block9fac8402011-05-12 15:51:54 +010050 : Thread("v8:ProfEvntProc"),
51 generator_(generator),
Steve Block791712a2010-08-27 10:21:07 +010052 running_(true),
Steve Block6ded16b2010-05-10 14:33:55 +010053 ticks_buffer_(sizeof(TickSampleEventRecord),
54 kTickSamplesBufferChunkSize,
55 kTickSamplesBufferChunksCount),
Kristian Monsen0d5e1162010-09-30 15:31:59 +010056 enqueue_order_(0),
57 known_functions_(new HashMap(AddressesMatch)) {
58}
59
60
61ProfilerEventsProcessor::~ProfilerEventsProcessor() {
62 delete known_functions_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010063}
Steve Block6ded16b2010-05-10 14:33:55 +010064
65
66void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
67 const char* prefix,
68 String* name,
69 Address start) {
70 if (FilterOutCodeCreateEvent(tag)) return;
71 CodeEventsContainer evt_rec;
72 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
73 rec->type = CodeEventRecord::CODE_CREATION;
74 rec->order = ++enqueue_order_;
75 rec->start = start;
76 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
77 rec->size = 1;
78 events_buffer_.Enqueue(evt_rec);
79}
80
81
82void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
83 String* name,
84 String* resource_name,
85 int line_number,
86 Address start,
87 unsigned size) {
88 if (FilterOutCodeCreateEvent(tag)) return;
89 CodeEventsContainer evt_rec;
90 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
91 rec->type = CodeEventRecord::CODE_CREATION;
92 rec->order = ++enqueue_order_;
93 rec->start = start;
94 rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
95 rec->size = size;
96 events_buffer_.Enqueue(evt_rec);
97}
98
99
100void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
101 const char* name,
102 Address start,
103 unsigned size) {
104 if (FilterOutCodeCreateEvent(tag)) return;
105 CodeEventsContainer evt_rec;
106 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
107 rec->type = CodeEventRecord::CODE_CREATION;
108 rec->order = ++enqueue_order_;
109 rec->start = start;
110 rec->entry = generator_->NewCodeEntry(tag, name);
111 rec->size = size;
112 events_buffer_.Enqueue(evt_rec);
113}
114
115
116void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
117 int args_count,
118 Address start,
119 unsigned size) {
120 if (FilterOutCodeCreateEvent(tag)) return;
121 CodeEventsContainer evt_rec;
122 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
123 rec->type = CodeEventRecord::CODE_CREATION;
124 rec->order = ++enqueue_order_;
125 rec->start = start;
126 rec->entry = generator_->NewCodeEntry(tag, args_count);
127 rec->size = size;
128 events_buffer_.Enqueue(evt_rec);
129}
130
131
132void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
133 CodeEventsContainer evt_rec;
134 CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
135 rec->type = CodeEventRecord::CODE_MOVE;
136 rec->order = ++enqueue_order_;
137 rec->from = from;
138 rec->to = to;
139 events_buffer_.Enqueue(evt_rec);
140}
141
142
143void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
144 CodeEventsContainer evt_rec;
145 CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
146 rec->type = CodeEventRecord::CODE_DELETE;
147 rec->order = ++enqueue_order_;
148 rec->start = from;
149 events_buffer_.Enqueue(evt_rec);
150}
151
152
153void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Leon Clarkef7060e22010-06-03 12:02:55 +0100154 Address start,
155 int security_token_id) {
Steve Block6ded16b2010-05-10 14:33:55 +0100156 CodeEventsContainer evt_rec;
157 CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
158 rec->type = CodeEventRecord::CODE_ALIAS;
159 rec->order = ++enqueue_order_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100160 rec->start = alias;
161 rec->entry = generator_->NewCodeEntry(security_token_id);
162 rec->code_start = start;
Steve Block6ded16b2010-05-10 14:33:55 +0100163 events_buffer_.Enqueue(evt_rec);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100164
165 known_functions_->Lookup(alias, AddressHash(alias), true);
Steve Block6ded16b2010-05-10 14:33:55 +0100166}
167
168
169void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
170 CodeMoveEvent(from, to);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100171
172 if (IsKnownFunction(from)) {
173 known_functions_->Remove(from, AddressHash(from));
174 known_functions_->Lookup(to, AddressHash(to), true);
175 }
Steve Block6ded16b2010-05-10 14:33:55 +0100176}
177
178
179void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
180 CodeDeleteEvent(from);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100181
182 known_functions_->Remove(from, AddressHash(from));
183}
184
185
186bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
187 HashMap::Entry* entry =
188 known_functions_->Lookup(start, AddressHash(start), false);
189 return entry != NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100190}
191
192
Ben Murdochf87a2032010-10-22 12:50:53 +0100193void ProfilerEventsProcessor::ProcessMovedFunctions() {
194 for (int i = 0; i < moved_functions_.length(); ++i) {
195 JSFunction* function = moved_functions_[i];
196 CpuProfiler::FunctionCreateEvent(function);
197 }
198 moved_functions_.Clear();
199}
200
201
202void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
203 moved_functions_.Add(function);
204}
205
206
Steve Block6ded16b2010-05-10 14:33:55 +0100207void ProfilerEventsProcessor::RegExpCodeCreateEvent(
208 Logger::LogEventsAndTags tag,
209 const char* prefix,
210 String* name,
211 Address start,
212 unsigned size) {
213 if (FilterOutCodeCreateEvent(tag)) return;
214 CodeEventsContainer evt_rec;
215 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
216 rec->type = CodeEventRecord::CODE_CREATION;
217 rec->order = ++enqueue_order_;
218 rec->start = start;
219 rec->entry = generator_->NewCodeEntry(tag, prefix, name);
220 rec->size = size;
221 events_buffer_.Enqueue(evt_rec);
222}
223
224
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100225void ProfilerEventsProcessor::AddCurrentStack() {
226 TickSampleEventRecord record;
227 TickSample* sample = &record.sample;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100228 sample->state = Top::current_vm_state();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100229 sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
230 sample->frames_count = 0;
231 for (StackTraceFrameIterator it;
232 !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
233 it.Advance()) {
234 JavaScriptFrame* frame = it.frame();
235 sample->stack[sample->frames_count++] =
236 reinterpret_cast<Address>(frame->function());
237 }
238 record.order = enqueue_order_;
239 ticks_from_vm_buffer_.Enqueue(record);
240}
241
242
Steve Block6ded16b2010-05-10 14:33:55 +0100243bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
244 if (!events_buffer_.IsEmpty()) {
245 CodeEventsContainer record;
246 events_buffer_.Dequeue(&record);
247 switch (record.generic.type) {
248#define PROFILER_TYPE_CASE(type, clss) \
249 case CodeEventRecord::type: \
250 record.clss##_.UpdateCodeMap(generator_->code_map()); \
251 break;
252
253 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
254
255#undef PROFILER_TYPE_CASE
256 default: return true; // Skip record.
257 }
258 *dequeue_order = record.generic.order;
259 return true;
260 }
261 return false;
262}
263
264
265bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
266 while (true) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100267 if (!ticks_from_vm_buffer_.IsEmpty()
268 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
269 TickSampleEventRecord record;
270 ticks_from_vm_buffer_.Dequeue(&record);
271 generator_->RecordTickSample(record.sample);
272 }
273
Steve Block6ded16b2010-05-10 14:33:55 +0100274 const TickSampleEventRecord* rec =
275 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100276 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
Iain Merrick9ac36c92010-09-13 15:29:50 +0100277 // Make a local copy of tick sample record to ensure that it won't
278 // be modified as we are processing it. This is possible as the
279 // sampler writes w/o any sync to the queue, so if the processor
280 // will get far behind, a record may be modified right under its
281 // feet.
282 TickSampleEventRecord record = *rec;
283 if (record.order == dequeue_order) {
284 // A paranoid check to make sure that we don't get a memory overrun
285 // in case of frames_count having a wild value.
286 if (record.sample.frames_count < 0
287 || record.sample.frames_count >= TickSample::kMaxFramesCount)
288 record.sample.frames_count = 0;
289 generator_->RecordTickSample(record.sample);
Steve Block6ded16b2010-05-10 14:33:55 +0100290 ticks_buffer_.FinishDequeue();
291 } else {
292 return true;
293 }
294 }
295}
296
297
298void ProfilerEventsProcessor::Run() {
299 unsigned dequeue_order = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100300
301 while (running_) {
302 // Process ticks until we have any.
303 if (ProcessTicks(dequeue_order)) {
304 // All ticks of the current dequeue_order are processed,
305 // proceed to the next code event.
306 ProcessCodeEvent(&dequeue_order);
307 }
308 YieldCPU();
309 }
310
311 // Process remaining tick events.
312 ticks_buffer_.FlushResidualRecords();
313 // Perform processing until we have tick events, skip remaining code events.
314 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
315}
316
317
318CpuProfiler* CpuProfiler::singleton_ = NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100319Atomic32 CpuProfiler::is_profiling_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100320
321void CpuProfiler::StartProfiling(const char* title) {
322 ASSERT(singleton_ != NULL);
323 singleton_->StartCollectingProfile(title);
324}
325
326
327void CpuProfiler::StartProfiling(String* title) {
328 ASSERT(singleton_ != NULL);
329 singleton_->StartCollectingProfile(title);
330}
331
332
333CpuProfile* CpuProfiler::StopProfiling(const char* title) {
334 return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
335}
336
337
Leon Clarkef7060e22010-06-03 12:02:55 +0100338CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
339 return is_profiling() ?
340 singleton_->StopCollectingProfile(security_token, title) : NULL;
Steve Block6ded16b2010-05-10 14:33:55 +0100341}
342
343
344int CpuProfiler::GetProfilesCount() {
345 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100346 // The count of profiles doesn't depend on a security token.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100347 return singleton_->profiles_->Profiles(
348 TokenEnumerator::kNoSecurityToken)->length();
Steve Block6ded16b2010-05-10 14:33:55 +0100349}
350
351
Leon Clarkef7060e22010-06-03 12:02:55 +0100352CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
Steve Block6ded16b2010-05-10 14:33:55 +0100353 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100354 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
355 return singleton_->profiles_->Profiles(token)->at(index);
Steve Block6ded16b2010-05-10 14:33:55 +0100356}
357
358
Leon Clarkef7060e22010-06-03 12:02:55 +0100359CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
Steve Block6ded16b2010-05-10 14:33:55 +0100360 ASSERT(singleton_ != NULL);
Leon Clarkef7060e22010-06-03 12:02:55 +0100361 const int token = singleton_->token_enumerator_->GetTokenId(security_token);
362 return singleton_->profiles_->GetProfile(token, uid);
Steve Block6ded16b2010-05-10 14:33:55 +0100363}
364
365
366TickSample* CpuProfiler::TickSampleEvent() {
367 if (CpuProfiler::is_profiling()) {
368 return singleton_->processor_->TickSampleEvent();
369 } else {
370 return NULL;
371 }
372}
373
374
375void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
376 singleton_->processor_->CallbackCreateEvent(
377 Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
378}
379
380
381void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
382 Code* code, const char* comment) {
383 singleton_->processor_->CodeCreateEvent(
384 tag, comment, code->address(), code->ExecutableSize());
385}
386
387
388void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
389 Code* code, String* name) {
390 singleton_->processor_->CodeCreateEvent(
391 tag,
392 name,
393 Heap::empty_string(),
394 v8::CpuProfileNode::kNoLineNumberInfo,
395 code->address(),
396 code->ExecutableSize());
397}
398
399
400void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
401 Code* code, String* name,
402 String* source, int line) {
403 singleton_->processor_->CodeCreateEvent(
404 tag,
405 name,
406 source,
407 line,
408 code->address(),
409 code->ExecutableSize());
410}
411
412
413void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
414 Code* code, int args_count) {
415 singleton_->processor_->CodeCreateEvent(
416 tag,
417 args_count,
418 code->address(),
419 code->ExecutableSize());
420}
421
422
423void CpuProfiler::CodeMoveEvent(Address from, Address to) {
424 singleton_->processor_->CodeMoveEvent(from, to);
425}
426
427
428void CpuProfiler::CodeDeleteEvent(Address from) {
429 singleton_->processor_->CodeDeleteEvent(from);
430}
431
432
433void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100434 int security_token_id = TokenEnumerator::kNoSecurityToken;
Leon Clarkef7060e22010-06-03 12:02:55 +0100435 if (function->unchecked_context()->IsContext()) {
436 security_token_id = singleton_->token_enumerator_->GetTokenId(
437 function->context()->global_context()->security_token());
438 }
Steve Block6ded16b2010-05-10 14:33:55 +0100439 singleton_->processor_->FunctionCreateEvent(
Leon Clarkef7060e22010-06-03 12:02:55 +0100440 function->address(),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100441 function->shared()->code()->address(),
Leon Clarkef7060e22010-06-03 12:02:55 +0100442 security_token_id);
Steve Block6ded16b2010-05-10 14:33:55 +0100443}
444
445
Ben Murdochf87a2032010-10-22 12:50:53 +0100446void CpuProfiler::ProcessMovedFunctions() {
447 singleton_->processor_->ProcessMovedFunctions();
448}
449
450
451void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100452 // This function is called from GC iterators (during Scavenge,
453 // MC, and MS), so marking bits can be set on objects. That's
454 // why unchecked accessors are used here.
455
456 // The same function can be reported several times.
457 if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
458 || singleton_->processor_->IsKnownFunction(function->address())) return;
459
Ben Murdochf87a2032010-10-22 12:50:53 +0100460 singleton_->processor_->RememberMovedFunction(function);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100461}
462
463
Steve Block6ded16b2010-05-10 14:33:55 +0100464void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
465 singleton_->processor_->FunctionMoveEvent(from, to);
466}
467
468
469void CpuProfiler::FunctionDeleteEvent(Address from) {
470 singleton_->processor_->FunctionDeleteEvent(from);
471}
472
473
474void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
475 singleton_->processor_->CallbackCreateEvent(
476 Logger::CALLBACK_TAG, "get ", name, entry_point);
477}
478
479
480void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
481 singleton_->processor_->RegExpCodeCreateEvent(
482 Logger::REG_EXP_TAG,
483 "RegExp: ",
484 source,
485 code->address(),
486 code->ExecutableSize());
487}
488
489
490void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
491 singleton_->processor_->CallbackCreateEvent(
492 Logger::CALLBACK_TAG, "set ", name, entry_point);
493}
494
495
496CpuProfiler::CpuProfiler()
497 : profiles_(new CpuProfilesCollection()),
498 next_profile_uid_(1),
Leon Clarkef7060e22010-06-03 12:02:55 +0100499 token_enumerator_(new TokenEnumerator()),
Steve Block6ded16b2010-05-10 14:33:55 +0100500 generator_(NULL),
501 processor_(NULL) {
502}
503
504
505CpuProfiler::~CpuProfiler() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100506 delete token_enumerator_;
Steve Block6ded16b2010-05-10 14:33:55 +0100507 delete profiles_;
508}
509
510
511void CpuProfiler::StartCollectingProfile(const char* title) {
512 if (profiles_->StartProfiling(title, next_profile_uid_++)) {
513 StartProcessorIfNotStarted();
514 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100515 processor_->AddCurrentStack();
Steve Block6ded16b2010-05-10 14:33:55 +0100516}
517
518
519void CpuProfiler::StartCollectingProfile(String* title) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100520 StartCollectingProfile(profiles_->GetName(title));
Steve Block6ded16b2010-05-10 14:33:55 +0100521}
522
523
524void CpuProfiler::StartProcessorIfNotStarted() {
525 if (processor_ == NULL) {
526 // Disable logging when using the new implementation.
527 saved_logging_nesting_ = Logger::logging_nesting_;
528 Logger::logging_nesting_ = 0;
529 generator_ = new ProfileGenerator(profiles_);
530 processor_ = new ProfilerEventsProcessor(generator_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100531 NoBarrier_Store(&is_profiling_, true);
Steve Block6ded16b2010-05-10 14:33:55 +0100532 processor_->Start();
Steve Block6ded16b2010-05-10 14:33:55 +0100533 // Enumerate stuff we already have in the heap.
534 if (Heap::HasBeenSetup()) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100535 if (!FLAG_prof_browser_mode) {
536 bool saved_log_code_flag = FLAG_log_code;
537 FLAG_log_code = true;
538 Logger::LogCodeObjects();
539 FLAG_log_code = saved_log_code_flag;
540 }
Steve Block6ded16b2010-05-10 14:33:55 +0100541 Logger::LogCompiledFunctions();
542 Logger::LogFunctionObjects();
543 Logger::LogAccessorCallbacks();
544 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100545 // Enable stack sampling.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100546 Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
547 if (!sampler->IsActive()) sampler->Start();
548 sampler->IncreaseProfilingDepth();
Steve Block6ded16b2010-05-10 14:33:55 +0100549 }
550}
551
552
553CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
554 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100555 StopProcessorIfLastProfile(title);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100556 CpuProfile* result =
557 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
558 title,
559 actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100560 if (result != NULL) {
561 result->Print();
562 }
563 return result;
564}
565
566
Leon Clarkef7060e22010-06-03 12:02:55 +0100567CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
568 String* title) {
Steve Block6ded16b2010-05-10 14:33:55 +0100569 const double actual_sampling_rate = generator_->actual_sampling_rate();
Iain Merrick75681382010-08-19 15:07:18 +0100570 const char* profile_title = profiles_->GetName(title);
571 StopProcessorIfLastProfile(profile_title);
Leon Clarkef7060e22010-06-03 12:02:55 +0100572 int token = token_enumerator_->GetTokenId(security_token);
Iain Merrick75681382010-08-19 15:07:18 +0100573 return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
Steve Block6ded16b2010-05-10 14:33:55 +0100574}
575
576
Iain Merrick75681382010-08-19 15:07:18 +0100577void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
578 if (profiles_->IsLastProfile(title)) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100579 Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
580 sampler->DecreaseProfilingDepth();
581 sampler->Stop();
Steve Block6ded16b2010-05-10 14:33:55 +0100582 processor_->Stop();
583 processor_->Join();
584 delete processor_;
585 delete generator_;
586 processor_ = NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100587 NoBarrier_Store(&is_profiling_, false);
Steve Block6ded16b2010-05-10 14:33:55 +0100588 generator_ = NULL;
589 Logger::logging_nesting_ = saved_logging_nesting_;
590 }
591}
592
593} } // namespace v8::internal
594
595#endif // ENABLE_LOGGING_AND_PROFILING
596
597namespace v8 {
598namespace internal {
599
600void CpuProfiler::Setup() {
601#ifdef ENABLE_LOGGING_AND_PROFILING
602 if (singleton_ == NULL) {
603 singleton_ = new CpuProfiler();
604 }
605#endif
606}
607
608
609void CpuProfiler::TearDown() {
610#ifdef ENABLE_LOGGING_AND_PROFILING
611 if (singleton_ != NULL) {
612 delete singleton_;
613 }
614 singleton_ = NULL;
615#endif
616}
617
618} } // namespace v8::internal