blob: abcd9e5d88852e6f414e7e5e5cf6ee80fc317df0 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/profiler/profile-generator.h"
6
7#include "src/ast/scopeinfo.h"
8#include "src/debug/debug.h"
9#include "src/deoptimizer.h"
10#include "src/global-handles.h"
11#include "src/profiler/profile-generator-inl.h"
12#include "src/profiler/sampler.h"
13#include "src/splay-tree-inl.h"
14#include "src/unicode.h"
15
16namespace v8 {
17namespace internal {
18
19
20JITLineInfoTable::JITLineInfoTable() {}
21
22
23JITLineInfoTable::~JITLineInfoTable() {}
24
25
26void JITLineInfoTable::SetPosition(int pc_offset, int line) {
27 DCHECK(pc_offset >= 0);
28 DCHECK(line > 0); // The 1-based number of the source line.
29 if (GetSourceLineNumber(pc_offset) != line) {
30 pc_offset_map_.insert(std::make_pair(pc_offset, line));
31 }
32}
33
34
35int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
36 PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
37 if (it == pc_offset_map_.end()) {
38 if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
39 return (--pc_offset_map_.end())->second;
40 }
41 return it->second;
42}
43
44
45const char* const CodeEntry::kEmptyNamePrefix = "";
46const char* const CodeEntry::kEmptyResourceName = "";
47const char* const CodeEntry::kEmptyBailoutReason = "";
48const char* const CodeEntry::kNoDeoptReason = "";
49
50
51CodeEntry::~CodeEntry() {
52 delete line_info_;
Ben Murdochda12d292016-06-02 14:46:10 +010053 for (auto location : inline_locations_) {
54 for (auto entry : location.second) {
55 delete entry;
56 }
57 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058}
59
60
61uint32_t CodeEntry::GetHash() const {
62 uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
63 if (script_id_ != v8::UnboundScript::kNoScriptId) {
64 hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
65 v8::internal::kZeroHashSeed);
66 hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
67 v8::internal::kZeroHashSeed);
68 } else {
69 hash ^= ComputeIntegerHash(
70 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
71 v8::internal::kZeroHashSeed);
72 hash ^= ComputeIntegerHash(
73 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
74 v8::internal::kZeroHashSeed);
75 hash ^= ComputeIntegerHash(
76 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
77 v8::internal::kZeroHashSeed);
78 hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
79 }
80 return hash;
81}
82
83
84bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
85 if (this == entry) return true;
86 if (script_id_ != v8::UnboundScript::kNoScriptId) {
87 return script_id_ == entry->script_id_ && position_ == entry->position_;
88 }
89 return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
90 resource_name_ == entry->resource_name_ &&
91 line_number_ == entry->line_number_;
92}
93
94
95void CodeEntry::SetBuiltinId(Builtins::Name id) {
96 bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
97 bit_field_ = BuiltinIdField::update(bit_field_, id);
98}
99
100
101int CodeEntry::GetSourceLine(int pc_offset) const {
102 if (line_info_ && !line_info_->empty()) {
103 return line_info_->GetSourceLineNumber(pc_offset);
104 }
105 return v8::CpuProfileNode::kNoLineNumberInfo;
106}
107
Ben Murdochda12d292016-06-02 14:46:10 +0100108void CodeEntry::AddInlineStack(int pc_offset,
109 std::vector<CodeEntry*>& inline_stack) {
110 // It's better to use std::move to place the vector into the map,
111 // but it's not supported by the current stdlibc++ on MacOS.
112 inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
113 .first->second.swap(inline_stack);
114}
115
116const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
117 auto it = inline_locations_.find(pc_offset);
118 return it != inline_locations_.end() ? &it->second : NULL;
119}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120
121void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
122 if (!shared->script()->IsScript()) return;
123 Script* script = Script::cast(shared->script());
124 set_script_id(script->id());
125 set_position(shared->start_position());
126 set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
127}
128
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000129CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
130 DCHECK(has_deopt_info());
131
132 CpuProfileDeoptInfo info;
133 info.deopt_reason = deopt_reason_;
134 if (inlined_function_infos_.empty()) {
135 info.stack.push_back(CpuProfileDeoptFrame(
136 {script_id_, position_ + deopt_position_.position()}));
137 return info;
138 }
139 // Copy the only branch from the inlining tree where the deopt happened.
140 SourcePosition position = deopt_position_;
141 int inlining_id = InlinedFunctionInfo::kNoParentId;
142 for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
143 InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
144 if (std::binary_search(current_info.deopt_pc_offsets.begin(),
145 current_info.deopt_pc_offsets.end(), pc_offset_)) {
146 inlining_id = static_cast<int>(i);
147 break;
148 }
149 }
150 while (inlining_id != InlinedFunctionInfo::kNoParentId) {
151 InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
152 info.stack.push_back(
153 CpuProfileDeoptFrame({inlined_info.script_id,
154 inlined_info.start_position + position.raw()}));
155 position = inlined_info.inline_position;
156 inlining_id = inlined_info.parent_id;
157 }
158 return info;
159}
160
161
162void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
163 deopt_infos_.push_back(entry->GetDeoptInfo());
164 entry->clear_deopt_info();
165}
166
167
168ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
169 HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
170 return map_entry != NULL ?
171 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
172}
173
174
175ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
176 HashMap::Entry* map_entry =
177 children_.LookupOrInsert(entry, CodeEntryHash(entry));
178 ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
179 if (node == NULL) {
180 // New node added.
181 node = new ProfileNode(tree_, entry);
182 map_entry->value = node;
183 children_list_.Add(node);
184 }
185 return node;
186}
187
188
189void ProfileNode::IncrementLineTicks(int src_line) {
190 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
191 // Increment a hit counter of a certain source line.
192 // Add a new source line if not found.
193 HashMap::Entry* e =
194 line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
195 DCHECK(e);
196 e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
197}
198
199
200bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
201 unsigned int length) const {
202 if (entries == NULL || length == 0) return false;
203
204 unsigned line_count = line_ticks_.occupancy();
205
206 if (line_count == 0) return true;
207 if (length < line_count) return false;
208
209 v8::CpuProfileNode::LineTick* entry = entries;
210
211 for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
212 p = line_ticks_.Next(p), entry++) {
213 entry->line =
214 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
215 entry->hit_count =
216 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
217 }
218
219 return true;
220}
221
222
223void ProfileNode::Print(int indent) {
224 base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
225 entry_->name_prefix(), entry_->name(), entry_->script_id(),
226 id());
227 if (entry_->resource_name()[0] != '\0')
228 base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
229 base::OS::Print("\n");
230 for (size_t i = 0; i < deopt_infos_.size(); ++i) {
231 CpuProfileDeoptInfo& info = deopt_infos_[i];
232 base::OS::Print(
233 "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
234 indent + 10, "", info.stack[0].script_id, info.stack[0].position,
235 info.deopt_reason);
236 for (size_t index = 1; index < info.stack.size(); ++index) {
237 base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
238 indent + 10, "", info.stack[index].script_id,
239 info.stack[index].position);
240 }
241 }
242 const char* bailout_reason = entry_->bailout_reason();
243 if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
244 bailout_reason != CodeEntry::kEmptyBailoutReason) {
245 base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
246 bailout_reason);
247 }
248 for (HashMap::Entry* p = children_.Start();
249 p != NULL;
250 p = children_.Next(p)) {
251 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
252 }
253}
254
255
256class DeleteNodesCallback {
257 public:
258 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
259
260 void AfterAllChildrenTraversed(ProfileNode* node) {
261 delete node;
262 }
263
264 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
265};
266
267
268ProfileTree::ProfileTree(Isolate* isolate)
269 : root_entry_(Logger::FUNCTION_TAG, "(root)"),
270 next_node_id_(1),
271 root_(new ProfileNode(this, &root_entry_)),
272 isolate_(isolate),
273 next_function_id_(1),
274 function_ids_(ProfileNode::CodeEntriesMatch) {}
275
276
277ProfileTree::~ProfileTree() {
278 DeleteNodesCallback cb;
279 TraverseDepthFirst(&cb);
280}
281
282
283unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
284 CodeEntry* code_entry = node->entry();
285 HashMap::Entry* entry =
286 function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
287 if (!entry->value) {
288 entry->value = reinterpret_cast<void*>(next_function_id_++);
289 }
290 return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
291}
292
Ben Murdochda12d292016-06-02 14:46:10 +0100293ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100294 int src_line, bool update_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000295 ProfileNode* node = root_;
296 CodeEntry* last_entry = NULL;
Ben Murdochda12d292016-06-02 14:46:10 +0100297 for (auto it = path.rbegin(); it != path.rend(); ++it) {
298 if (*it == NULL) continue;
299 last_entry = *it;
300 node = node->FindOrAddChild(*it);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000301 }
302 if (last_entry && last_entry->has_deopt_info()) {
303 node->CollectDeoptInfo(last_entry);
304 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100305 if (update_stats) {
306 node->IncrementSelfTicks();
307 if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
308 node->IncrementLineTicks(src_line);
309 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000310 }
311 return node;
312}
313
314
315struct NodesPair {
316 NodesPair(ProfileNode* src, ProfileNode* dst)
317 : src(src), dst(dst) { }
318 ProfileNode* src;
319 ProfileNode* dst;
320};
321
322
323class Position {
324 public:
325 explicit Position(ProfileNode* node)
326 : node(node), child_idx_(0) { }
327 INLINE(ProfileNode* current_child()) {
328 return node->children()->at(child_idx_);
329 }
330 INLINE(bool has_current_child()) {
331 return child_idx_ < node->children()->length();
332 }
333 INLINE(void next_child()) { ++child_idx_; }
334
335 ProfileNode* node;
336 private:
337 int child_idx_;
338};
339
340
341// Non-recursive implementation of a depth-first post-order tree traversal.
342template <typename Callback>
343void ProfileTree::TraverseDepthFirst(Callback* callback) {
344 List<Position> stack(10);
345 stack.Add(Position(root_));
346 while (stack.length() > 0) {
347 Position& current = stack.last();
348 if (current.has_current_child()) {
349 callback->BeforeTraversingChild(current.node, current.current_child());
350 stack.Add(Position(current.current_child()));
351 } else {
352 callback->AfterAllChildrenTraversed(current.node);
353 if (stack.length() > 1) {
354 Position& parent = stack[stack.length() - 2];
355 callback->AfterChildTraversed(parent.node, current.node);
356 parent.next_child();
357 }
358 // Remove child from the stack.
359 stack.RemoveLast();
360 }
361 }
362}
363
364
365CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
366 : title_(title),
367 record_samples_(record_samples),
368 start_time_(base::TimeTicks::HighResolutionNow()),
369 top_down_(isolate) {}
370
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000371void CpuProfile::AddPath(base::TimeTicks timestamp,
Ben Murdochda12d292016-06-02 14:46:10 +0100372 const std::vector<CodeEntry*>& path, int src_line,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100373 bool update_stats) {
374 ProfileNode* top_frame_node =
375 top_down_.AddPathFromEnd(path, src_line, update_stats);
376 if (record_samples_ && !timestamp.IsNull()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000377 timestamps_.Add(timestamp);
378 samples_.Add(top_frame_node);
379 }
380}
381
382
383void CpuProfile::CalculateTotalTicksAndSamplingRate() {
384 end_time_ = base::TimeTicks::HighResolutionNow();
385}
386
387
388void CpuProfile::Print() {
389 base::OS::Print("[Top down]:\n");
390 top_down_.Print();
391}
392
393
394CodeMap::~CodeMap() {}
395
396
397const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
398
399
400void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
401 DeleteAllCoveredCode(addr, addr + size);
402 CodeTree::Locator locator;
403 tree_.Insert(addr, &locator);
404 locator.set_value(CodeEntryInfo(entry, size));
405}
406
407
408void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
409 List<Address> to_delete;
410 Address addr = end - 1;
411 while (addr >= start) {
412 CodeTree::Locator locator;
413 if (!tree_.FindGreatestLessThan(addr, &locator)) break;
414 Address start2 = locator.key(), end2 = start2 + locator.value().size;
415 if (start2 < end && start < end2) to_delete.Add(start2);
416 addr = start2 - 1;
417 }
418 for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
419}
420
421
422CodeEntry* CodeMap::FindEntry(Address addr) {
423 CodeTree::Locator locator;
424 if (tree_.FindGreatestLessThan(addr, &locator)) {
425 // locator.key() <= addr. Need to check that addr is within entry.
426 const CodeEntryInfo& entry = locator.value();
427 if (addr < (locator.key() + entry.size)) {
428 return entry.entry;
429 }
430 }
431 return NULL;
432}
433
434
435void CodeMap::MoveCode(Address from, Address to) {
436 if (from == to) return;
437 CodeTree::Locator locator;
438 if (!tree_.Find(from, &locator)) return;
439 CodeEntryInfo entry = locator.value();
440 tree_.Remove(from);
441 AddCode(to, entry.entry, entry.size);
442}
443
444
445void CodeMap::CodeTreePrinter::Call(
446 const Address& key, const CodeMap::CodeEntryInfo& value) {
447 base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
448}
449
450
451void CodeMap::Print() {
452 CodeTreePrinter printer;
453 tree_.ForEach(&printer);
454}
455
456
457CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
458 : function_and_resource_names_(heap),
459 isolate_(heap->isolate()),
460 current_profiles_semaphore_(1) {}
461
462
463static void DeleteCodeEntry(CodeEntry** entry_ptr) {
464 delete *entry_ptr;
465}
466
467
468static void DeleteCpuProfile(CpuProfile** profile_ptr) {
469 delete *profile_ptr;
470}
471
472
473CpuProfilesCollection::~CpuProfilesCollection() {
474 finished_profiles_.Iterate(DeleteCpuProfile);
475 current_profiles_.Iterate(DeleteCpuProfile);
476 code_entries_.Iterate(DeleteCodeEntry);
477}
478
479
480bool CpuProfilesCollection::StartProfiling(const char* title,
481 bool record_samples) {
482 current_profiles_semaphore_.Wait();
483 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
484 current_profiles_semaphore_.Signal();
485 return false;
486 }
487 for (int i = 0; i < current_profiles_.length(); ++i) {
488 if (strcmp(current_profiles_[i]->title(), title) == 0) {
489 // Ignore attempts to start profile with the same title...
490 current_profiles_semaphore_.Signal();
491 // ... though return true to force it collect a sample.
492 return true;
493 }
494 }
495 current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
496 current_profiles_semaphore_.Signal();
497 return true;
498}
499
500
501CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
502 const int title_len = StrLength(title);
503 CpuProfile* profile = NULL;
504 current_profiles_semaphore_.Wait();
505 for (int i = current_profiles_.length() - 1; i >= 0; --i) {
506 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
507 profile = current_profiles_.Remove(i);
508 break;
509 }
510 }
511 current_profiles_semaphore_.Signal();
512
513 if (profile == NULL) return NULL;
514 profile->CalculateTotalTicksAndSamplingRate();
515 finished_profiles_.Add(profile);
516 return profile;
517}
518
519
520bool CpuProfilesCollection::IsLastProfile(const char* title) {
521 // Called from VM thread, and only it can mutate the list,
522 // so no locking is needed here.
523 if (current_profiles_.length() != 1) return false;
524 return StrLength(title) == 0
525 || strcmp(current_profiles_[0]->title(), title) == 0;
526}
527
528
529void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
530 // Called from VM thread for a completed profile.
531 for (int i = 0; i < finished_profiles_.length(); i++) {
532 if (profile == finished_profiles_[i]) {
533 finished_profiles_.Remove(i);
534 return;
535 }
536 }
537 UNREACHABLE();
538}
539
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000540void CpuProfilesCollection::AddPathToCurrentProfiles(
Ben Murdochda12d292016-06-02 14:46:10 +0100541 base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
542 int src_line, bool update_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000543 // As starting / stopping profiles is rare relatively to this
544 // method, we don't bother minimizing the duration of lock holding,
545 // e.g. copying contents of the list to a local vector.
546 current_profiles_semaphore_.Wait();
547 for (int i = 0; i < current_profiles_.length(); ++i) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100548 current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000549 }
550 current_profiles_semaphore_.Signal();
551}
552
553
554CodeEntry* CpuProfilesCollection::NewCodeEntry(
555 Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
556 const char* resource_name, int line_number, int column_number,
557 JITLineInfoTable* line_info, Address instruction_start) {
558 CodeEntry* code_entry =
559 new CodeEntry(tag, name, name_prefix, resource_name, line_number,
560 column_number, line_info, instruction_start);
561 code_entries_.Add(code_entry);
562 return code_entry;
563}
564
565
566const char* const ProfileGenerator::kProgramEntryName =
567 "(program)";
568const char* const ProfileGenerator::kIdleEntryName =
569 "(idle)";
570const char* const ProfileGenerator::kGarbageCollectorEntryName =
571 "(garbage collector)";
572const char* const ProfileGenerator::kUnresolvedFunctionName =
573 "(unresolved function)";
574
575
576ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
577 : profiles_(profiles),
578 program_entry_(
579 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
580 idle_entry_(
581 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
582 gc_entry_(
583 profiles->NewCodeEntry(Logger::BUILTIN_TAG,
584 kGarbageCollectorEntryName)),
585 unresolved_entry_(
586 profiles->NewCodeEntry(Logger::FUNCTION_TAG,
587 kUnresolvedFunctionName)) {
588}
589
590
591void ProfileGenerator::RecordTickSample(const TickSample& sample) {
Ben Murdochda12d292016-06-02 14:46:10 +0100592 std::vector<CodeEntry*> entries;
593 // Conservatively reserve space for stack frames + pc + function + vm-state.
594 // There could in fact be more of them because of inlined entries.
595 entries.reserve(sample.frames_count + 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596
597 // The ProfileNode knows nothing about all versions of generated code for
598 // the same JS function. The line number information associated with
599 // the latest version of generated code is used to find a source line number
600 // for a JS function. Then, the detected source line is passed to
601 // ProfileNode to increase the tick count for this source line.
602 int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
603 bool src_line_not_found = true;
604
605 if (sample.pc != NULL) {
606 if (sample.has_external_callback && sample.state == EXTERNAL &&
607 sample.top_frame_type == StackFrame::EXIT) {
608 // Don't use PC when in external callback code, as it can point
609 // inside callback's code, and we will erroneously report
610 // that a callback calls itself.
Ben Murdochda12d292016-06-02 14:46:10 +0100611 entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000612 } else {
613 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
614 // If there is no pc_entry we're likely in native code.
615 // Find out, if top of stack was pointing inside a JS function
616 // meaning that we have encountered a frameless invocation.
617 if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
Ben Murdochda12d292016-06-02 14:46:10 +0100618 sample.top_frame_type == StackFrame::INTERPRETED ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000619 sample.top_frame_type == StackFrame::OPTIMIZED)) {
620 pc_entry = code_map_.FindEntry(sample.tos);
621 }
622 // If pc is in the function code before it set up stack frame or after the
623 // frame was destroyed SafeStackFrameIterator incorrectly thinks that
624 // ebp contains return address of the current function and skips caller's
625 // frame. Check for this case and just skip such samples.
626 if (pc_entry) {
627 int pc_offset =
628 static_cast<int>(sample.pc - pc_entry->instruction_start());
629 src_line = pc_entry->GetSourceLine(pc_offset);
630 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
631 src_line = pc_entry->line_number();
632 }
633 src_line_not_found = false;
Ben Murdochda12d292016-06-02 14:46:10 +0100634 entries.push_back(pc_entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000635
636 if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
637 pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
638 // When current function is either the Function.prototype.apply or the
639 // Function.prototype.call builtin the top frame is either frame of
640 // the calling JS function or internal frame.
641 // In the latter case we know the caller for sure but in the
642 // former case we don't so we simply replace the frame with
643 // 'unresolved' entry.
644 if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
Ben Murdochda12d292016-06-02 14:46:10 +0100645 entries.push_back(unresolved_entry_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000646 }
647 }
648 }
649 }
650
Ben Murdoch097c5b22016-05-18 11:27:45 +0100651 for (const Address *stack_pos = sample.stack,
652 *stack_end = stack_pos + sample.frames_count;
653 stack_pos != stack_end; ++stack_pos) {
Ben Murdochda12d292016-06-02 14:46:10 +0100654 CodeEntry* entry = code_map_.FindEntry(*stack_pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000655
Ben Murdochda12d292016-06-02 14:46:10 +0100656 if (entry) {
657 // Find out if the entry has an inlining stack associated.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000658 int pc_offset =
Ben Murdochda12d292016-06-02 14:46:10 +0100659 static_cast<int>(*stack_pos - entry->instruction_start());
660 const std::vector<CodeEntry*>* inline_stack =
661 entry->GetInlineStack(pc_offset);
662 if (inline_stack) {
663 entries.insert(entries.end(), inline_stack->rbegin(),
664 inline_stack->rend());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000665 }
Ben Murdochda12d292016-06-02 14:46:10 +0100666 // Skip unresolved frames (e.g. internal frame) and get source line of
667 // the first JS caller.
668 if (src_line_not_found) {
669 src_line = entry->GetSourceLine(pc_offset);
670 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
671 src_line = entry->line_number();
672 }
673 src_line_not_found = false;
674 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000675 }
Ben Murdochda12d292016-06-02 14:46:10 +0100676 entries.push_back(entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677 }
678 }
679
680 if (FLAG_prof_browser_mode) {
681 bool no_symbolized_entries = true;
Ben Murdochda12d292016-06-02 14:46:10 +0100682 for (auto e : entries) {
683 if (e != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000684 no_symbolized_entries = false;
685 break;
686 }
687 }
688 // If no frames were symbolized, put the VM state entry in.
689 if (no_symbolized_entries) {
Ben Murdochda12d292016-06-02 14:46:10 +0100690 entries.push_back(EntryForVMState(sample.state));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000691 }
692 }
693
Ben Murdoch097c5b22016-05-18 11:27:45 +0100694 profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
695 sample.update_stats);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696}
697
698
699CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
700 switch (tag) {
701 case GC:
702 return gc_entry_;
703 case JS:
704 case COMPILER:
705 // DOM events handlers are reported as OTHER / EXTERNAL entries.
706 // To avoid confusing people, let's put all these entries into
707 // one bucket.
708 case OTHER:
709 case EXTERNAL:
710 return program_entry_;
711 case IDLE:
712 return idle_entry_;
713 default: return NULL;
714 }
715}
716
717} // namespace internal
718} // namespace v8