blob: b07601f820fbecdb037744c5a845c8e7806be160 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/profiler/profile-generator.h"
6
7#include "src/ast/scopeinfo.h"
Ben Murdochc5610432016-08-08 18:44:38 +01008#include "src/base/adapters.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/debug/debug.h"
10#include "src/deoptimizer.h"
11#include "src/global-handles.h"
12#include "src/profiler/profile-generator-inl.h"
Ben Murdochc5610432016-08-08 18:44:38 +010013#include "src/profiler/tick-sample.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/splay-tree-inl.h"
15#include "src/unicode.h"
16
17namespace v8 {
18namespace internal {
19
20
21JITLineInfoTable::JITLineInfoTable() {}
22
23
24JITLineInfoTable::~JITLineInfoTable() {}
25
26
27void JITLineInfoTable::SetPosition(int pc_offset, int line) {
28 DCHECK(pc_offset >= 0);
29 DCHECK(line > 0); // The 1-based number of the source line.
30 if (GetSourceLineNumber(pc_offset) != line) {
31 pc_offset_map_.insert(std::make_pair(pc_offset, line));
32 }
33}
34
35
36int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
37 PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
38 if (it == pc_offset_map_.end()) {
39 if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
40 return (--pc_offset_map_.end())->second;
41 }
42 return it->second;
43}
44
45
46const char* const CodeEntry::kEmptyNamePrefix = "";
47const char* const CodeEntry::kEmptyResourceName = "";
48const char* const CodeEntry::kEmptyBailoutReason = "";
49const char* const CodeEntry::kNoDeoptReason = "";
50
51
52CodeEntry::~CodeEntry() {
53 delete line_info_;
Ben Murdochda12d292016-06-02 14:46:10 +010054 for (auto location : inline_locations_) {
55 for (auto entry : location.second) {
56 delete entry;
57 }
58 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000059}
60
61
62uint32_t CodeEntry::GetHash() const {
63 uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
64 if (script_id_ != v8::UnboundScript::kNoScriptId) {
65 hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
66 v8::internal::kZeroHashSeed);
67 hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
68 v8::internal::kZeroHashSeed);
69 } else {
70 hash ^= ComputeIntegerHash(
71 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
72 v8::internal::kZeroHashSeed);
73 hash ^= ComputeIntegerHash(
74 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
75 v8::internal::kZeroHashSeed);
76 hash ^= ComputeIntegerHash(
77 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
78 v8::internal::kZeroHashSeed);
79 hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
80 }
81 return hash;
82}
83
84
85bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
86 if (this == entry) return true;
87 if (script_id_ != v8::UnboundScript::kNoScriptId) {
88 return script_id_ == entry->script_id_ && position_ == entry->position_;
89 }
90 return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
91 resource_name_ == entry->resource_name_ &&
92 line_number_ == entry->line_number_;
93}
94
95
96void CodeEntry::SetBuiltinId(Builtins::Name id) {
97 bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
98 bit_field_ = BuiltinIdField::update(bit_field_, id);
99}
100
101
102int CodeEntry::GetSourceLine(int pc_offset) const {
103 if (line_info_ && !line_info_->empty()) {
104 return line_info_->GetSourceLineNumber(pc_offset);
105 }
106 return v8::CpuProfileNode::kNoLineNumberInfo;
107}
108
Ben Murdochda12d292016-06-02 14:46:10 +0100109void CodeEntry::AddInlineStack(int pc_offset,
110 std::vector<CodeEntry*>& inline_stack) {
111 // It's better to use std::move to place the vector into the map,
112 // but it's not supported by the current stdlibc++ on MacOS.
113 inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
114 .first->second.swap(inline_stack);
115}
116
117const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
118 auto it = inline_locations_.find(pc_offset);
119 return it != inline_locations_.end() ? &it->second : NULL;
120}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000121
Ben Murdochc5610432016-08-08 18:44:38 +0100122void CodeEntry::AddDeoptInlinedFrames(
123 int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
124 // It's better to use std::move to place the vector into the map,
125 // but it's not supported by the current stdlibc++ on MacOS.
126 deopt_inlined_frames_
127 .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
128 .first->second.swap(inlined_frames);
129}
130
131bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
132 return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
133}
134
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000135void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
136 if (!shared->script()->IsScript()) return;
137 Script* script = Script::cast(shared->script());
138 set_script_id(script->id());
139 set_position(shared->start_position());
140 set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
141}
142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
144 DCHECK(has_deopt_info());
145
146 CpuProfileDeoptInfo info;
147 info.deopt_reason = deopt_reason_;
Ben Murdochc5610432016-08-08 18:44:38 +0100148 DCHECK_NE(Deoptimizer::DeoptInfo::kNoDeoptId, deopt_id_);
149 if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000150 info.stack.push_back(CpuProfileDeoptFrame(
151 {script_id_, position_ + deopt_position_.position()}));
Ben Murdochc5610432016-08-08 18:44:38 +0100152 } else {
153 size_t deopt_position = deopt_position_.raw();
154 // Copy stack of inlined frames where the deopt happened.
155 std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
156 for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
157 info.stack.push_back(CpuProfileDeoptFrame(
158 {inlined_frame.script_id, deopt_position + inlined_frame.position}));
159 deopt_position = 0; // Done with innermost frame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160 }
161 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000162 return info;
163}
164
165
166void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
167 deopt_infos_.push_back(entry->GetDeoptInfo());
168 entry->clear_deopt_info();
169}
170
171
172ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
173 HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
174 return map_entry != NULL ?
175 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
176}
177
178
179ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
180 HashMap::Entry* map_entry =
181 children_.LookupOrInsert(entry, CodeEntryHash(entry));
182 ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
183 if (node == NULL) {
184 // New node added.
185 node = new ProfileNode(tree_, entry);
186 map_entry->value = node;
187 children_list_.Add(node);
188 }
189 return node;
190}
191
192
193void ProfileNode::IncrementLineTicks(int src_line) {
194 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
195 // Increment a hit counter of a certain source line.
196 // Add a new source line if not found.
197 HashMap::Entry* e =
198 line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
199 DCHECK(e);
200 e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
201}
202
203
204bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
205 unsigned int length) const {
206 if (entries == NULL || length == 0) return false;
207
208 unsigned line_count = line_ticks_.occupancy();
209
210 if (line_count == 0) return true;
211 if (length < line_count) return false;
212
213 v8::CpuProfileNode::LineTick* entry = entries;
214
215 for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
216 p = line_ticks_.Next(p), entry++) {
217 entry->line =
218 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
219 entry->hit_count =
220 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
221 }
222
223 return true;
224}
225
226
227void ProfileNode::Print(int indent) {
228 base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
229 entry_->name_prefix(), entry_->name(), entry_->script_id(),
230 id());
231 if (entry_->resource_name()[0] != '\0')
232 base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
233 base::OS::Print("\n");
234 for (size_t i = 0; i < deopt_infos_.size(); ++i) {
235 CpuProfileDeoptInfo& info = deopt_infos_[i];
Ben Murdochc5610432016-08-08 18:44:38 +0100236 base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
237 " with reason '%s'.\n",
238 indent + 10, "", info.stack[0].script_id,
239 info.stack[0].position, info.deopt_reason);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 for (size_t index = 1; index < info.stack.size(); ++index) {
Ben Murdochc5610432016-08-08 18:44:38 +0100241 base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
242 ".\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000243 indent + 10, "", info.stack[index].script_id,
244 info.stack[index].position);
245 }
246 }
247 const char* bailout_reason = entry_->bailout_reason();
248 if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
249 bailout_reason != CodeEntry::kEmptyBailoutReason) {
250 base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
251 bailout_reason);
252 }
253 for (HashMap::Entry* p = children_.Start();
254 p != NULL;
255 p = children_.Next(p)) {
256 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
257 }
258}
259
260
261class DeleteNodesCallback {
262 public:
263 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
264
265 void AfterAllChildrenTraversed(ProfileNode* node) {
266 delete node;
267 }
268
269 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
270};
271
272
273ProfileTree::ProfileTree(Isolate* isolate)
274 : root_entry_(Logger::FUNCTION_TAG, "(root)"),
275 next_node_id_(1),
276 root_(new ProfileNode(this, &root_entry_)),
277 isolate_(isolate),
278 next_function_id_(1),
279 function_ids_(ProfileNode::CodeEntriesMatch) {}
280
281
282ProfileTree::~ProfileTree() {
283 DeleteNodesCallback cb;
284 TraverseDepthFirst(&cb);
285}
286
287
288unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
289 CodeEntry* code_entry = node->entry();
290 HashMap::Entry* entry =
291 function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
292 if (!entry->value) {
293 entry->value = reinterpret_cast<void*>(next_function_id_++);
294 }
295 return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
296}
297
Ben Murdochda12d292016-06-02 14:46:10 +0100298ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100299 int src_line, bool update_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300 ProfileNode* node = root_;
301 CodeEntry* last_entry = NULL;
Ben Murdochda12d292016-06-02 14:46:10 +0100302 for (auto it = path.rbegin(); it != path.rend(); ++it) {
303 if (*it == NULL) continue;
304 last_entry = *it;
305 node = node->FindOrAddChild(*it);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000306 }
307 if (last_entry && last_entry->has_deopt_info()) {
308 node->CollectDeoptInfo(last_entry);
309 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100310 if (update_stats) {
311 node->IncrementSelfTicks();
312 if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
313 node->IncrementLineTicks(src_line);
314 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315 }
316 return node;
317}
318
319
320struct NodesPair {
321 NodesPair(ProfileNode* src, ProfileNode* dst)
322 : src(src), dst(dst) { }
323 ProfileNode* src;
324 ProfileNode* dst;
325};
326
327
328class Position {
329 public:
330 explicit Position(ProfileNode* node)
331 : node(node), child_idx_(0) { }
332 INLINE(ProfileNode* current_child()) {
333 return node->children()->at(child_idx_);
334 }
335 INLINE(bool has_current_child()) {
336 return child_idx_ < node->children()->length();
337 }
338 INLINE(void next_child()) { ++child_idx_; }
339
340 ProfileNode* node;
341 private:
342 int child_idx_;
343};
344
345
346// Non-recursive implementation of a depth-first post-order tree traversal.
347template <typename Callback>
348void ProfileTree::TraverseDepthFirst(Callback* callback) {
349 List<Position> stack(10);
350 stack.Add(Position(root_));
351 while (stack.length() > 0) {
352 Position& current = stack.last();
353 if (current.has_current_child()) {
354 callback->BeforeTraversingChild(current.node, current.current_child());
355 stack.Add(Position(current.current_child()));
356 } else {
357 callback->AfterAllChildrenTraversed(current.node);
358 if (stack.length() > 1) {
359 Position& parent = stack[stack.length() - 2];
360 callback->AfterChildTraversed(parent.node, current.node);
361 parent.next_child();
362 }
363 // Remove child from the stack.
364 stack.RemoveLast();
365 }
366 }
367}
368
369
370CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
371 : title_(title),
372 record_samples_(record_samples),
373 start_time_(base::TimeTicks::HighResolutionNow()),
374 top_down_(isolate) {}
375
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000376void CpuProfile::AddPath(base::TimeTicks timestamp,
Ben Murdochda12d292016-06-02 14:46:10 +0100377 const std::vector<CodeEntry*>& path, int src_line,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100378 bool update_stats) {
379 ProfileNode* top_frame_node =
380 top_down_.AddPathFromEnd(path, src_line, update_stats);
381 if (record_samples_ && !timestamp.IsNull()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000382 timestamps_.Add(timestamp);
383 samples_.Add(top_frame_node);
384 }
385}
386
387
388void CpuProfile::CalculateTotalTicksAndSamplingRate() {
389 end_time_ = base::TimeTicks::HighResolutionNow();
390}
391
392
393void CpuProfile::Print() {
394 base::OS::Print("[Top down]:\n");
395 top_down_.Print();
396}
397
398
399CodeMap::~CodeMap() {}
400
401
402const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
403
404
405void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
406 DeleteAllCoveredCode(addr, addr + size);
407 CodeTree::Locator locator;
408 tree_.Insert(addr, &locator);
409 locator.set_value(CodeEntryInfo(entry, size));
410}
411
412
413void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
414 List<Address> to_delete;
415 Address addr = end - 1;
416 while (addr >= start) {
417 CodeTree::Locator locator;
418 if (!tree_.FindGreatestLessThan(addr, &locator)) break;
419 Address start2 = locator.key(), end2 = start2 + locator.value().size;
420 if (start2 < end && start < end2) to_delete.Add(start2);
421 addr = start2 - 1;
422 }
423 for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
424}
425
426
427CodeEntry* CodeMap::FindEntry(Address addr) {
428 CodeTree::Locator locator;
429 if (tree_.FindGreatestLessThan(addr, &locator)) {
430 // locator.key() <= addr. Need to check that addr is within entry.
431 const CodeEntryInfo& entry = locator.value();
432 if (addr < (locator.key() + entry.size)) {
433 return entry.entry;
434 }
435 }
436 return NULL;
437}
438
439
440void CodeMap::MoveCode(Address from, Address to) {
441 if (from == to) return;
442 CodeTree::Locator locator;
443 if (!tree_.Find(from, &locator)) return;
444 CodeEntryInfo entry = locator.value();
445 tree_.Remove(from);
446 AddCode(to, entry.entry, entry.size);
447}
448
449
450void CodeMap::CodeTreePrinter::Call(
451 const Address& key, const CodeMap::CodeEntryInfo& value) {
452 base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
453}
454
455
456void CodeMap::Print() {
457 CodeTreePrinter printer;
458 tree_.ForEach(&printer);
459}
460
461
462CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
463 : function_and_resource_names_(heap),
464 isolate_(heap->isolate()),
465 current_profiles_semaphore_(1) {}
466
467
468static void DeleteCodeEntry(CodeEntry** entry_ptr) {
469 delete *entry_ptr;
470}
471
472
473static void DeleteCpuProfile(CpuProfile** profile_ptr) {
474 delete *profile_ptr;
475}
476
477
478CpuProfilesCollection::~CpuProfilesCollection() {
479 finished_profiles_.Iterate(DeleteCpuProfile);
480 current_profiles_.Iterate(DeleteCpuProfile);
481 code_entries_.Iterate(DeleteCodeEntry);
482}
483
484
485bool CpuProfilesCollection::StartProfiling(const char* title,
486 bool record_samples) {
487 current_profiles_semaphore_.Wait();
488 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
489 current_profiles_semaphore_.Signal();
490 return false;
491 }
492 for (int i = 0; i < current_profiles_.length(); ++i) {
493 if (strcmp(current_profiles_[i]->title(), title) == 0) {
494 // Ignore attempts to start profile with the same title...
495 current_profiles_semaphore_.Signal();
496 // ... though return true to force it collect a sample.
497 return true;
498 }
499 }
500 current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
501 current_profiles_semaphore_.Signal();
502 return true;
503}
504
505
506CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
507 const int title_len = StrLength(title);
508 CpuProfile* profile = NULL;
509 current_profiles_semaphore_.Wait();
510 for (int i = current_profiles_.length() - 1; i >= 0; --i) {
511 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
512 profile = current_profiles_.Remove(i);
513 break;
514 }
515 }
516 current_profiles_semaphore_.Signal();
517
518 if (profile == NULL) return NULL;
519 profile->CalculateTotalTicksAndSamplingRate();
520 finished_profiles_.Add(profile);
521 return profile;
522}
523
524
525bool CpuProfilesCollection::IsLastProfile(const char* title) {
526 // Called from VM thread, and only it can mutate the list,
527 // so no locking is needed here.
528 if (current_profiles_.length() != 1) return false;
529 return StrLength(title) == 0
530 || strcmp(current_profiles_[0]->title(), title) == 0;
531}
532
533
534void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
535 // Called from VM thread for a completed profile.
536 for (int i = 0; i < finished_profiles_.length(); i++) {
537 if (profile == finished_profiles_[i]) {
538 finished_profiles_.Remove(i);
539 return;
540 }
541 }
542 UNREACHABLE();
543}
544
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000545void CpuProfilesCollection::AddPathToCurrentProfiles(
Ben Murdochda12d292016-06-02 14:46:10 +0100546 base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
547 int src_line, bool update_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000548 // As starting / stopping profiles is rare relatively to this
549 // method, we don't bother minimizing the duration of lock holding,
550 // e.g. copying contents of the list to a local vector.
551 current_profiles_semaphore_.Wait();
552 for (int i = 0; i < current_profiles_.length(); ++i) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100553 current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000554 }
555 current_profiles_semaphore_.Signal();
556}
557
558
559CodeEntry* CpuProfilesCollection::NewCodeEntry(
560 Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
561 const char* resource_name, int line_number, int column_number,
562 JITLineInfoTable* line_info, Address instruction_start) {
563 CodeEntry* code_entry =
564 new CodeEntry(tag, name, name_prefix, resource_name, line_number,
565 column_number, line_info, instruction_start);
566 code_entries_.Add(code_entry);
567 return code_entry;
568}
569
570
571const char* const ProfileGenerator::kProgramEntryName =
572 "(program)";
573const char* const ProfileGenerator::kIdleEntryName =
574 "(idle)";
575const char* const ProfileGenerator::kGarbageCollectorEntryName =
576 "(garbage collector)";
577const char* const ProfileGenerator::kUnresolvedFunctionName =
578 "(unresolved function)";
579
580
581ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
582 : profiles_(profiles),
583 program_entry_(
584 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
585 idle_entry_(
586 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
587 gc_entry_(
588 profiles->NewCodeEntry(Logger::BUILTIN_TAG,
589 kGarbageCollectorEntryName)),
590 unresolved_entry_(
591 profiles->NewCodeEntry(Logger::FUNCTION_TAG,
592 kUnresolvedFunctionName)) {
593}
594
595
596void ProfileGenerator::RecordTickSample(const TickSample& sample) {
Ben Murdochda12d292016-06-02 14:46:10 +0100597 std::vector<CodeEntry*> entries;
598 // Conservatively reserve space for stack frames + pc + function + vm-state.
599 // There could in fact be more of them because of inlined entries.
600 entries.reserve(sample.frames_count + 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000601
602 // The ProfileNode knows nothing about all versions of generated code for
603 // the same JS function. The line number information associated with
604 // the latest version of generated code is used to find a source line number
605 // for a JS function. Then, the detected source line is passed to
606 // ProfileNode to increase the tick count for this source line.
607 int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
608 bool src_line_not_found = true;
609
610 if (sample.pc != NULL) {
611 if (sample.has_external_callback && sample.state == EXTERNAL &&
612 sample.top_frame_type == StackFrame::EXIT) {
613 // Don't use PC when in external callback code, as it can point
614 // inside callback's code, and we will erroneously report
615 // that a callback calls itself.
Ben Murdochda12d292016-06-02 14:46:10 +0100616 entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000617 } else {
618 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
619 // If there is no pc_entry we're likely in native code.
620 // Find out, if top of stack was pointing inside a JS function
621 // meaning that we have encountered a frameless invocation.
622 if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
Ben Murdochda12d292016-06-02 14:46:10 +0100623 sample.top_frame_type == StackFrame::INTERPRETED ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000624 sample.top_frame_type == StackFrame::OPTIMIZED)) {
625 pc_entry = code_map_.FindEntry(sample.tos);
626 }
627 // If pc is in the function code before it set up stack frame or after the
628 // frame was destroyed SafeStackFrameIterator incorrectly thinks that
629 // ebp contains return address of the current function and skips caller's
630 // frame. Check for this case and just skip such samples.
631 if (pc_entry) {
632 int pc_offset =
633 static_cast<int>(sample.pc - pc_entry->instruction_start());
634 src_line = pc_entry->GetSourceLine(pc_offset);
635 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
636 src_line = pc_entry->line_number();
637 }
638 src_line_not_found = false;
Ben Murdochda12d292016-06-02 14:46:10 +0100639 entries.push_back(pc_entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000640
641 if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
642 pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
643 // When current function is either the Function.prototype.apply or the
644 // Function.prototype.call builtin the top frame is either frame of
645 // the calling JS function or internal frame.
646 // In the latter case we know the caller for sure but in the
647 // former case we don't so we simply replace the frame with
648 // 'unresolved' entry.
649 if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
Ben Murdochda12d292016-06-02 14:46:10 +0100650 entries.push_back(unresolved_entry_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000651 }
652 }
653 }
654 }
655
Ben Murdoch097c5b22016-05-18 11:27:45 +0100656 for (const Address *stack_pos = sample.stack,
657 *stack_end = stack_pos + sample.frames_count;
658 stack_pos != stack_end; ++stack_pos) {
Ben Murdochda12d292016-06-02 14:46:10 +0100659 CodeEntry* entry = code_map_.FindEntry(*stack_pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660
Ben Murdochda12d292016-06-02 14:46:10 +0100661 if (entry) {
662 // Find out if the entry has an inlining stack associated.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000663 int pc_offset =
Ben Murdochda12d292016-06-02 14:46:10 +0100664 static_cast<int>(*stack_pos - entry->instruction_start());
665 const std::vector<CodeEntry*>* inline_stack =
666 entry->GetInlineStack(pc_offset);
667 if (inline_stack) {
668 entries.insert(entries.end(), inline_stack->rbegin(),
669 inline_stack->rend());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000670 }
Ben Murdochda12d292016-06-02 14:46:10 +0100671 // Skip unresolved frames (e.g. internal frame) and get source line of
672 // the first JS caller.
673 if (src_line_not_found) {
674 src_line = entry->GetSourceLine(pc_offset);
675 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
676 src_line = entry->line_number();
677 }
678 src_line_not_found = false;
679 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000680 }
Ben Murdochda12d292016-06-02 14:46:10 +0100681 entries.push_back(entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682 }
683 }
684
685 if (FLAG_prof_browser_mode) {
686 bool no_symbolized_entries = true;
Ben Murdochda12d292016-06-02 14:46:10 +0100687 for (auto e : entries) {
688 if (e != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000689 no_symbolized_entries = false;
690 break;
691 }
692 }
693 // If no frames were symbolized, put the VM state entry in.
694 if (no_symbolized_entries) {
Ben Murdochda12d292016-06-02 14:46:10 +0100695 entries.push_back(EntryForVMState(sample.state));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696 }
697 }
698
Ben Murdoch097c5b22016-05-18 11:27:45 +0100699 profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
700 sample.update_stats);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701}
702
703
704CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
705 switch (tag) {
706 case GC:
707 return gc_entry_;
708 case JS:
709 case COMPILER:
710 // DOM events handlers are reported as OTHER / EXTERNAL entries.
711 // To avoid confusing people, let's put all these entries into
712 // one bucket.
713 case OTHER:
714 case EXTERNAL:
715 return program_entry_;
716 case IDLE:
717 return idle_entry_;
718 default: return NULL;
719 }
720}
721
722} // namespace internal
723} // namespace v8