Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/profiler/allocation-tracker.cc b/src/profiler/allocation-tracker.cc
index 791cdf0..6acd191 100644
--- a/src/profiler/allocation-tracker.cc
+++ b/src/profiler/allocation-tracker.cc
@@ -149,8 +149,7 @@
void AddressToTraceMap::Print() {
- PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
- ranges_.size());
+ PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
PrintF("[%p - %p] => %u\n", it->second.start, it->first,
it->second.trace_node_id);
@@ -231,7 +230,7 @@
Isolate* isolate = heap->isolate();
int length = 0;
- StackTraceFrameIterator it(isolate);
+ JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
@@ -307,9 +306,8 @@
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- &HandleWeakScript);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
+ &HandleWeakScript, v8::WeakCallbackType::kParameter);
}
@@ -327,9 +325,8 @@
info_->column = Script::GetColumnNumber(script_, start_position_);
}
-
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+ const v8::WeakCallbackInfo<void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
diff --git a/src/profiler/allocation-tracker.h b/src/profiler/allocation-tracker.h
index 03802a5..dbcf4a7 100644
--- a/src/profiler/allocation-tracker.h
+++ b/src/profiler/allocation-tracker.h
@@ -129,8 +129,7 @@
void Resolve();
private:
- static void HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void HandleWeakScript(const v8::WeakCallbackInfo<void>& data);
Handle<Script> script_;
int start_position_;
diff --git a/src/profiler/cpu-profiler-inl.h b/src/profiler/cpu-profiler-inl.h
index 45e4ccf..d8c9c90 100644
--- a/src/profiler/cpu-profiler-inl.h
+++ b/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, position, pc_offset);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
}
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index 47585b7..5e4a444 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -242,7 +242,7 @@
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
AbstractCode* code,
SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name) {
+ Name* script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -253,9 +253,6 @@
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
NULL, code->instruction_start());
RecordInliningInfo(rec->entry, code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
rec->entry->FillFunctionInfo(shared);
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
@@ -263,8 +260,7 @@
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
AbstractCode* abstract_code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name,
+ SharedFunctionInfo* shared, Name* script_name,
int line, int column) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -310,9 +306,7 @@
profiles_->GetName(InferScriptName(script_name, shared)), line, column,
line_table, abstract_code->instruction_start());
RecordInliningInfo(rec->entry, abstract_code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
+ RecordDeoptInlinedFrames(rec->entry, abstract_code);
rec->entry->FillFunctionInfo(shared);
rec->size = abstract_code->ExecutableSize();
processor_->Enqueue(evt_rec);
@@ -356,7 +350,7 @@
rec->start = code->address();
rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
rec->position = info.position;
- rec->pc_offset = pc - code->instruction_start();
+ rec->deopt_id = info.deopt_id;
processor_->Enqueue(evt_rec);
processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
}
@@ -453,6 +447,54 @@
}
}
+void CpuProfiler::RecordDeoptInlinedFrames(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
+ Code* code = abstract_code->GetCode();
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
+ for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
+ RelocInfo* reloc_info = rit.rinfo();
+ DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
+ int deopt_id = static_cast<int>(reloc_info->data());
+ int translation_index =
+ deopt_input_data->TranslationIndex(deopt_id)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ BailoutId ast_id = BailoutId(it.Next());
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ int source_position = Deoptimizer::ComputeSourcePosition(shared, ast_id);
+ int script_id = v8::UnboundScript::kNoScriptId;
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ script_id = script->id();
+ }
+ CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
+ inlined_frames.push_back(frame);
+ }
+ if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+ entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
+ DCHECK(inlined_frames.empty());
+ }
+ }
+}
+
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
sampling_interval_(base::TimeDelta::FromMicroseconds(
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index a04ee3c..ed1e15f 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -6,13 +6,14 @@
#define V8_PROFILER_CPU_PROFILER_H_
#include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/sampler.h"
+#include "src/profiler/tick-sample.h"
namespace v8 {
namespace internal {
@@ -20,7 +21,6 @@
// Forward declarations.
class CodeEntry;
class CodeMap;
-class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
@@ -81,7 +81,7 @@
Address start;
const char* deopt_reason;
SourcePosition position;
- size_t pc_offset;
+ int deopt_id;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -176,7 +176,7 @@
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- AtomicNumber<unsigned> last_code_event_id_;
+ base::AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
@@ -226,11 +226,10 @@
void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
Name* name) override;
void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name) override;
+ SharedFunctionInfo* shared, Name* script_name) override;
void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name, int line, int column) override;
+ SharedFunctionInfo* shared, Name* script_name, int line,
+ int column) override;
void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
int args_count) override;
void CodeMovingGCEvent() override {}
@@ -259,6 +258,7 @@
void ResetProfiles();
void LogBuiltins();
void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+ void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
Name* InferScriptName(Name* name, SharedFunctionInfo* info);
Isolate* isolate_;
diff --git a/src/profiler/heap-profiler.cc b/src/profiler/heap-profiler.cc
index 1305cae..e048fae 100644
--- a/src/profiler/heap-profiler.cc
+++ b/src/profiler/heap-profiler.cc
@@ -84,14 +84,14 @@
return result;
}
-
-bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
- int stack_depth) {
+bool HeapProfiler::StartSamplingHeapProfiler(
+ uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags) {
if (sampling_heap_profiler_.get()) {
return false;
}
sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
- heap(), names_.get(), sample_interval, stack_depth));
+ heap(), names_.get(), sample_interval, stack_depth, flags));
return true;
}
diff --git a/src/profiler/heap-profiler.h b/src/profiler/heap-profiler.h
index 32e143c..93cb57a 100644
--- a/src/profiler/heap-profiler.h
+++ b/src/profiler/heap-profiler.h
@@ -30,7 +30,8 @@
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
- bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+ bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags);
void StopSamplingHeapProfiler();
bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
AllocationProfile* GetAllocationProfile();
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 748f307..e67acef 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -80,7 +80,7 @@
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
- base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
+ base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent,
' ', prefix, edge_name);
if (type() != kString) {
base::OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -1058,8 +1058,6 @@
ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
} else if (obj->IsAccessorPair()) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsBox()) {
@@ -1444,19 +1442,6 @@
}
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
@@ -1598,14 +1583,8 @@
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- if (k != heap_->hidden_properties_symbol()) {
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
- value, NULL, field_offset);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value,
- field_offset);
- }
+ SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
+ value, NULL, field_offset);
break;
}
case kDescriptor:
@@ -1625,11 +1604,6 @@
DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
Object* value = cell->value();
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = cell->property_details();
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -1642,11 +1616,6 @@
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(k)) {
Object* value = dictionary->ValueAt(i);
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = dictionary->DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -2262,9 +2231,9 @@
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
const char* name = elements != -1
- ? names_->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : names_->GetCopy(info->GetLabel());
+ ? names_->GetFormatted("%s / %" V8PRIdPTR " entries",
+ info->GetLabel(), elements)
+ : names_->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
name,
diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h
index 857f240..255f61d 100644
--- a/src/profiler/heap-snapshot-generator.h
+++ b/src/profiler/heap-snapshot-generator.h
@@ -382,7 +382,6 @@
void ExtractScriptReferences(int entry, Script* script);
void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index abcd9e5..b07601f 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -5,11 +5,12 @@
#include "src/profiler/profile-generator.h"
#include "src/ast/scopeinfo.h"
+#include "src/base/adapters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
+#include "src/profiler/tick-sample.h"
#include "src/splay-tree-inl.h"
#include "src/unicode.h"
@@ -118,6 +119,19 @@
return it != inline_locations_.end() ? &it->second : NULL;
}
+void CodeEntry::AddDeoptInlinedFrames(
+ int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
+ // It's better to use std::move to place the vector into the map,
+ // but it's not supported by the current stdlibc++ on MacOS.
+ deopt_inlined_frames_
+ .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
+ .first->second.swap(inlined_frames);
+}
+
+bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
+ return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
+}
+
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
Script* script = Script::cast(shared->script());
@@ -131,30 +145,20 @@
CpuProfileDeoptInfo info;
info.deopt_reason = deopt_reason_;
- if (inlined_function_infos_.empty()) {
+ DCHECK_NE(Deoptimizer::DeoptInfo::kNoDeoptId, deopt_id_);
+ if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
info.stack.push_back(CpuProfileDeoptFrame(
{script_id_, position_ + deopt_position_.position()}));
- return info;
- }
- // Copy the only branch from the inlining tree where the deopt happened.
- SourcePosition position = deopt_position_;
- int inlining_id = InlinedFunctionInfo::kNoParentId;
- for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
- InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
- if (std::binary_search(current_info.deopt_pc_offsets.begin(),
- current_info.deopt_pc_offsets.end(), pc_offset_)) {
- inlining_id = static_cast<int>(i);
- break;
+ } else {
+ size_t deopt_position = deopt_position_.raw();
+ // Copy stack of inlined frames where the deopt happened.
+ std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
+ for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
+ info.stack.push_back(CpuProfileDeoptFrame(
+ {inlined_frame.script_id, deopt_position + inlined_frame.position}));
+ deopt_position = 0; // Done with innermost frame.
}
}
- while (inlining_id != InlinedFunctionInfo::kNoParentId) {
- InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
- info.stack.push_back(
- CpuProfileDeoptFrame({inlined_info.script_id,
- inlined_info.start_position + position.raw()}));
- position = inlined_info.inline_position;
- inlining_id = inlined_info.parent_id;
- }
return info;
}
@@ -229,12 +233,13 @@
base::OS::Print("\n");
for (size_t i = 0; i < deopt_infos_.size(); ++i) {
CpuProfileDeoptInfo& info = deopt_infos_[i];
- base::OS::Print(
- "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
- indent + 10, "", info.stack[0].script_id, info.stack[0].position,
- info.deopt_reason);
+ base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
+ " with reason '%s'.\n",
+ indent + 10, "", info.stack[0].script_id,
+ info.stack[0].position, info.deopt_reason);
for (size_t index = 1; index < info.stack.size(); ++index) {
- base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
+ base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
+ ".\n",
indent + 10, "", info.stack[index].script_id,
info.stack[index].position);
}
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 194b490..5c017e1 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -47,6 +47,13 @@
Address instruction_start = NULL);
~CodeEntry();
+ // Container describing inlined frames at eager deopt points. Is eventually
+ // being translated into v8::CpuProfileDeoptFrame by the profiler.
+ struct DeoptInlinedFrame {
+ int position;
+ int script_id;
+ };
+
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
const char* name() const { return name_; }
@@ -64,11 +71,11 @@
const char* bailout_reason() const { return bailout_reason_; }
void set_deopt_info(const char* deopt_reason, SourcePosition position,
- size_t pc_offset) {
+ int deopt_id) {
DCHECK(deopt_position_.IsUnknown());
deopt_reason_ = deopt_reason;
deopt_position_ = position;
- pc_offset_ = pc_offset;
+ deopt_id_ = deopt_id;
}
CpuProfileDeoptInfo GetDeoptInfo();
const char* deopt_reason() const { return deopt_reason_; }
@@ -81,14 +88,6 @@
void FillFunctionInfo(SharedFunctionInfo* shared);
- void set_inlined_function_infos(
- const std::vector<InlinedFunctionInfo>& infos) {
- inlined_function_infos_ = infos;
- }
- const std::vector<InlinedFunctionInfo> inlined_function_infos() {
- return inlined_function_infos_;
- }
-
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
return BuiltinIdField::decode(bit_field_);
@@ -102,6 +101,9 @@
void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+ void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+ bool HasDeoptInlinedFramesFor(int deopt_id) const;
+
Address instruction_start() const { return instruction_start_; }
Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
@@ -125,13 +127,12 @@
const char* bailout_reason_;
const char* deopt_reason_;
SourcePosition deopt_position_;
- size_t pc_offset_;
+ int deopt_id_;
JITLineInfoTable* line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
std::map<int, std::vector<CodeEntry*>> inline_locations_;
-
- std::vector<InlinedFunctionInfo> inlined_function_infos_;
+ std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
index a340424..ae47dca 100644
--- a/src/profiler/sampler.cc
+++ b/src/profiler/sampler.cc
@@ -42,14 +42,12 @@
#endif
+#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/frames-inl.h"
-#include "src/log.h"
#include "src/profiler/cpu-profiler-inl.h"
+#include "src/profiler/tick-sample.h"
#include "src/simulator.h"
#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -174,68 +172,52 @@
};
-bool IsSamePage(byte* ptr1, byte* ptr2) {
- const uint32_t kPageSize = 4096;
- uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
- return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
- (reinterpret_cast<uintptr_t>(ptr2) & mask);
-}
+typedef List<Sampler*> SamplerList;
-
-// Check if the code at specified address could potentially be a
-// frame setup code.
-bool IsNoFrameRegion(Address address) {
- struct Pattern {
- int bytes_count;
- byte bytes[8];
- int offsets[4];
- };
- byte* pc = reinterpret_cast<byte*>(address);
- static Pattern patterns[] = {
-#if V8_HOST_ARCH_IA32
- // push %ebp
- // mov %esp,%ebp
- {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
- // pop %ebp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // pop %ebp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#elif V8_HOST_ARCH_X64
- // pushq %rbp
- // movq %rsp,%rbp
- {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
- // popq %rbp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // popq %rbp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#endif
- {0, {}, {}}
- };
- for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
- for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
- int offset = *offset_ptr;
- if (!offset || IsSamePage(pc, pc - offset)) {
- MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
- if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
- return true;
- } else {
- // It is not safe to examine bytes on another page as it might not be
- // allocated thus causing a SEGFAULT.
- // Check the pattern part that's on the same page and
- // pessimistically assume it could be the entire pattern match.
- MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
- if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
- return true;
- }
- }
+#if defined(USE_SIGNALS)
+class AtomicGuard {
+ public:
+ explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
+ : atomic_(atomic),
+ is_success_(false) {
+ do {
+ // Use Acquire_Load to gain mutual exclusion.
+ USE(atomic_->Value());
+ is_success_ = atomic_->TrySetValue(0, 1);
+ } while (is_block && !is_success_);
}
- return false;
+
+ bool is_success() { return is_success_; }
+
+ ~AtomicGuard() {
+ if (is_success_) {
+ atomic_->SetValue(0);
+ }
+ atomic_ = NULL;
+ }
+
+ private:
+ base::AtomicValue<int>* atomic_;
+ bool is_success_;
+};
+
+
+// Returns key for hash map.
+void* ThreadKey(pthread_t thread_id) {
+ return reinterpret_cast<void*>(thread_id);
}
+
+// Returns hash value for hash map.
+uint32_t ThreadHash(pthread_t thread_id) {
+#if V8_OS_MACOSX
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
+#else
+ return static_cast<uint32_t>(thread_id);
+#endif
+}
+#endif // USE_SIGNALS
+
} // namespace
#if defined(USE_SIGNALS)
@@ -284,75 +266,6 @@
#endif
-#if defined(USE_SIMULATOR)
-class SimulatorHelper {
- public:
- inline bool Init(Isolate* isolate) {
- simulator_ = isolate->thread_local_top()->simulator_;
- // Check if there is active simulator.
- return simulator_ != NULL;
- }
-
- inline void FillRegisters(v8::RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::r11));
-#elif V8_TARGET_ARCH_ARM64
- if (simulator_->sp() == 0 || simulator_->fp() == 0) {
- // It's possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to a new value.
- // Bailout if sp/fp doesn't contain the new value.
- //
- // FIXME: The above doesn't really solve the issue.
- // If a 64-bit target is executed on a 32-bit host even the final
- // write is non-atomic, so it might obtain a half of the result.
- // Moreover as long as the register set code uses memcpy (as of now),
- // it is not guaranteed to be atomic even when both host and target
- // are of same bitness.
- return;
- }
- state->pc = reinterpret_cast<Address>(simulator_->pc());
- state->sp = reinterpret_cast<Address>(simulator_->sp());
- state->fp = reinterpret_cast<Address>(simulator_->fp());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::fp));
-#elif V8_TARGET_ARCH_PPC
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#elif V8_TARGET_ARCH_S390
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#endif
- }
-
- private:
- Simulator* simulator_;
-};
-#endif // USE_SIMULATOR
-
-
#if defined(USE_SIGNALS)
class SignalHandler : public AllStatic {
@@ -374,6 +287,10 @@
return signal_handler_installed_;
}
+#if !V8_OS_NACL
+ static void CollectSample(void* context, Sampler* sampler);
+#endif
+
private:
static void Install() {
#if !V8_OS_NACL
@@ -418,34 +335,25 @@
// As Native Client does not support signal handling, profiling is disabled.
#if !V8_OS_NACL
-void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
- void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UnsafeCurrent();
- if (isolate == NULL || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
+void SignalHandler::CollectSample(void* context, Sampler* sampler) {
+ if (sampler == NULL || (!sampler->IsProfiling() &&
+ !sampler->IsRegistered())) {
return;
}
+ Isolate* isolate = sampler->isolate();
+
+ // We require a fully initialized and entered isolate.
+ if (isolate == NULL || !isolate->IsInUse()) return;
+
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL) return;
-
v8::RegisterState state;
#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate)) return;
- helper.FillRegisters(&state);
- // It possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to the new value.
- // Bailout if sp/fp doesn't contain the new value.
- if (state.sp == 0 || state.fp == 0) return;
+ if (!SimulatorHelper::FillRegisters(isolate, &state)) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -582,7 +490,7 @@
}
#endif // V8_OS_NACL
-#endif
+#endif // USE_SIGNALS
class SamplerThread : public base::Thread {
@@ -607,19 +515,46 @@
}
DCHECK(sampler->IsActive());
- DCHECK(!instance_->active_samplers_.Contains(sampler));
DCHECK(instance_->interval_ == sampler->interval());
+
+#if defined(USE_SIGNALS)
+ AddSampler(sampler);
+#else
+ DCHECK(!instance_->active_samplers_.Contains(sampler));
instance_->active_samplers_.Add(sampler);
+#endif // USE_SIGNALS
if (need_to_start) instance_->StartSynchronously();
}
- static void RemoveActiveSampler(Sampler* sampler) {
+ static void RemoveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
base::LockGuard<base::Mutex> lock_guard(mutex_);
- DCHECK(sampler->IsActive());
+ DCHECK(sampler->IsActive() || sampler->IsRegistered());
+#if defined(USE_SIGNALS)
+ {
+ AtomicGuard atomic_guard(&sampler_list_access_counter_);
+ // Remove sampler from map.
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ void* thread_key = ThreadKey(thread_id);
+ uint32_t thread_hash = ThreadHash(thread_id);
+ HashMap::Entry* entry =
+ thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
+ DCHECK(entry != NULL);
+ SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+ samplers->RemoveElement(sampler);
+ if (samplers->is_empty()) {
+ thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
+ delete samplers;
+ }
+ if (thread_id_to_samplers_.Get().occupancy() == 0) {
+ instance_to_remove = instance_;
+ instance_ = NULL;
+ }
+ }
+#else
bool removed = instance_->active_samplers_.RemoveElement(sampler);
DCHECK(removed);
USE(removed);
@@ -630,6 +565,7 @@
instance_to_remove = instance_;
instance_ = NULL;
}
+#endif // USE_SIGNALS
}
if (!instance_to_remove) return;
@@ -637,11 +573,36 @@
delete instance_to_remove;
}
+ // Unlike AddActiveSampler, this method only adds a sampler,
+ // but won't start the sampler thread.
+ static void RegisterSampler(Sampler* sampler) {
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
+#if defined(USE_SIGNALS)
+ AddSampler(sampler);
+#endif // USE_SIGNALS
+ }
+
// Implement Thread::Run().
virtual void Run() {
while (true) {
{
base::LockGuard<base::Mutex> lock_guard(mutex_);
+#if defined(USE_SIGNALS)
+ if (thread_id_to_samplers_.Get().occupancy() == 0) break;
+ if (SignalHandler::Installed()) {
+ for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
+ p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
+#if V8_OS_AIX && V8_TARGET_ARCH_PPC64
+ // on AIX64, cannot cast (void *) to pthread_t which is
+ // of type unsigned int (4bytes)
+ pthread_t thread_id = reinterpret_cast<intptr_t>(p->key);
+#else
+ pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
+#endif
+ pthread_kill(thread_id, SIGPROF);
+ }
+ }
+#else
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
@@ -650,6 +611,7 @@
if (!sampler->IsProfiling()) continue;
sampler->DoSample();
}
+#endif // USE_SIGNALS
}
base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
}
@@ -661,7 +623,38 @@
static SamplerThread* instance_;
const int interval_;
- List<Sampler*> active_samplers_;
+
+#if defined(USE_SIGNALS)
+ struct HashMapCreateTrait {
+ static void Construct(HashMap* allocated_ptr) {
+ new (allocated_ptr) HashMap(HashMap::PointersMatch);
+ }
+ };
+ friend class SignalHandler;
+ static base::LazyInstance<HashMap, HashMapCreateTrait>::type
+ thread_id_to_samplers_;
+ static base::AtomicValue<int> sampler_list_access_counter_;
+ static void AddSampler(Sampler* sampler) {
+ AtomicGuard atomic_guard(&sampler_list_access_counter_);
+ // Add sampler into map if needed.
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ HashMap::Entry *entry =
+ thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
+ ThreadHash(thread_id));
+ if (entry->value == NULL) {
+ SamplerList* samplers = new SamplerList();
+ samplers->Add(sampler);
+ entry->value = samplers;
+ } else {
+ SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+ if (!samplers->Contains(sampler)) {
+ samplers->Add(sampler);
+ }
+ }
+ }
+#else
+ SamplerList active_samplers_;
+#endif // USE_SIGNALS
DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
@@ -669,103 +662,33 @@
base::Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
+#if defined(USE_SIGNALS)
+base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
+ SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
+base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
-
-//
-// StackTracer implementation
-//
-DISABLE_ASAN void TickSample::Init(Isolate* isolate,
- const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- bool update_stats) {
- timestamp = base::TimeTicks::HighResolutionNow();
- pc = reinterpret_cast<Address>(regs.pc);
- state = isolate->current_vm_state();
- this->update_stats = update_stats;
-
- // Avoid collecting traces while doing GC.
- if (state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- if (pc && IsNoFrameRegion(pc)) {
- // Can't collect stack. Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
+// As Native Client does not support signal handling, profiling is disabled.
+#if !V8_OS_NACL
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+ void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
+ if (!atomic_guard.is_success()) return;
+ pthread_t thread_id = pthread_self();
+ HashMap::Entry* entry =
+ SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
+ ThreadKey(thread_id), ThreadHash(thread_id));
+ if (entry == NULL)
return;
- }
-
- ExternalCallbackScope* scope = isolate->external_callback_scope();
- Address handler = Isolate::handler(isolate->thread_local_top());
- // If there is a handler on top of the external callback scope then
- // we have already entrered JavaScript again and the external callback
- // is not the top function.
- if (scope && scope->scope_address() < handler) {
- external_callback_entry = *scope->callback_entrypoint_address();
- has_external_callback = true;
- } else {
- // sp register may point at an arbitrary place in memory, make
- // sure MSAN doesn't complain about it.
- MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
- has_external_callback = false;
- }
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- top_frame_type = it.top_frame_type();
-
- SampleInfo info;
- GetStackSample(isolate, regs, record_c_entry_frame,
- reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
- frames_count = static_cast<unsigned>(info.frames_count);
- if (!frames_count) {
- // It is executing JS but failed to collect a stack trace.
- // Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
+ SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+ for (int i = 0; i < samplers->length(); ++i) {
+ Sampler* sampler = samplers->at(i);
+ CollectSample(context, sampler);
}
}
-
-
-void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info) {
- sample_info->frames_count = 0;
- sample_info->vm_state = isolate->current_vm_state();
- if (sample_info->vm_state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- size_t i = 0;
- if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
- it.top_frame_type() == StackFrame::EXIT) {
- frames[i++] = isolate->c_function();
- }
- while (!it.done() && i < frames_limit) {
- if (it.frame()->is_interpreted()) {
- // For interpreted frames use the bytecode array pointer as the pc.
- InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
- // Since the sampler can interrupt execution at any point the
- // bytecode_array might be garbage, so don't dereference it.
- Address bytecode_array =
- reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
- frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
- frame->GetBytecodeOffset();
- } else {
- frames[i++] = it.frame()->pc();
- }
- it.Advance();
- }
- sample_info->frames_count = i;
-}
+#endif // !V8_OS_NACL
+#endif // USE_SIGNALs
void Sampler::SetUp() {
@@ -789,6 +712,7 @@
profiling_(false),
has_processing_thread_(false),
active_(false),
+ registered_(false),
is_counting_samples_(false),
js_sample_count_(0),
external_sample_count_(0) {
@@ -797,6 +721,9 @@
Sampler::~Sampler() {
DCHECK(!IsActive());
+ if (IsRegistered()) {
+ SamplerThread::RemoveSampler(this);
+ }
delete data_;
}
@@ -809,8 +736,9 @@
void Sampler::Stop() {
DCHECK(IsActive());
- SamplerThread::RemoveActiveSampler(this);
+ SamplerThread::RemoveSampler(this);
SetActive(false);
+ SetRegistered(false);
}
@@ -850,6 +778,10 @@
void Sampler::DoSample() {
if (!SignalHandler::Installed()) return;
+ if (!IsActive() && !IsRegistered()) {
+ SamplerThread::RegisterSampler(this);
+ SetRegistered(true);
+ }
pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
@@ -859,11 +791,6 @@
HANDLE profiled_thread = platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate())) return;
-#endif
-
const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) return;
@@ -874,7 +801,10 @@
if (GetThreadContext(profiled_thread, &context) != 0) {
v8::RegisterState state;
#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
+ if (!SimulatorHelper::FillRegisters(isolate(), &state)) {
+ ResumeThread(profiled_thread);
+ return;
+ }
#else
#if V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(context.Rip);
diff --git a/src/profiler/sampler.h b/src/profiler/sampler.h
index dcd1255..3d3a6e9 100644
--- a/src/profiler/sampler.h
+++ b/src/profiler/sampler.h
@@ -8,14 +8,13 @@
#include "include/v8.h"
#include "src/base/atomicops.h"
-#include "src/base/platform/time.h"
-#include "src/frames.h"
-#include "src/globals.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
class Isolate;
+struct TickSample;
// ----------------------------------------------------------------------------
// Sampler
@@ -24,43 +23,6 @@
// (if used for profiling) the program counter and stack pointer for
// the thread that created it.
-// TickSample captures the information collected for each sample.
-struct TickSample {
- // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
- // include the runtime function we're calling. Externally exposed tick
- // samples don't care.
- enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
-
- TickSample()
- : state(OTHER),
- pc(NULL),
- external_callback_entry(NULL),
- frames_count(0),
- has_external_callback(false),
- update_stats(true),
- top_frame_type(StackFrame::NONE) {}
- void Init(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame, bool update_stats);
- static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info);
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback_entry;
- };
- static const unsigned kMaxFramesCountLog2 = 8;
- static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
- Address stack[kMaxFramesCount]; // Call stack.
- base::TimeTicks timestamp;
- unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
- bool has_external_callback : 1;
- bool update_stats : 1; // Whether the sample should update aggregated stats.
- StackFrame::Type top_frame_type : 5;
-};
-
class Sampler {
public:
// Initializes the Sampler support. Called once at VM startup.
@@ -92,6 +54,11 @@
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return base::NoBarrier_Load(&active_); }
+ // CpuProfiler collects samples by calling DoSample directly
+ // without calling Start. To keep it working, we register the sampler
+ // with the CpuProfiler.
+ bool IsRegistered() const { return base::NoBarrier_Load(®istered_); }
+
void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
@@ -119,11 +86,14 @@
private:
void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
+ void SetRegistered(bool value) { base::NoBarrier_Store(®istered_, value); }
+
Isolate* isolate_;
const int interval_;
base::Atomic32 profiling_;
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
+ base::Atomic32 registered_;
PlatformData* data_; // Platform specific data.
// Counts stack samples taken in various VM states.
bool is_counting_samples_;
@@ -132,7 +102,6 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
-
} // namespace internal
} // namespace v8
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index a32cae3..db9214d 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -47,8 +47,9 @@
return {size, static_cast<unsigned int>(count * scale + 0.5)};
}
-SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
- uint64_t rate, int stack_depth)
+SamplingHeapProfiler::SamplingHeapProfiler(
+ Heap* heap, StringsStorage* names, uint64_t rate, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags)
: isolate_(heap->isolate()),
heap_(heap),
new_space_observer_(new SamplingAllocationObserver(
@@ -58,14 +59,15 @@
heap_, static_cast<intptr_t>(rate), rate, this,
heap->isolate()->random_number_generator())),
names_(names),
- profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+ profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
samples_(),
stack_depth_(stack_depth),
- rate_(rate) {
+ rate_(rate),
+ flags_(flags) {
CHECK_GT(rate_, 0);
heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap->new_space()) {
space->AddAllocationObserver(other_spaces_observer_.get());
}
@@ -76,7 +78,7 @@
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap_->new_space()) {
space->RemoveAllocationObserver(other_spaces_observer_.get());
}
@@ -109,6 +111,7 @@
Sample* sample = new Sample(size, node, loc, this);
samples_.insert(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+ sample->global.MarkIndependent();
}
void SamplingHeapProfiler::OnWeakCallback(
@@ -117,22 +120,34 @@
AllocationNode* node = sample->owner;
DCHECK(node->allocations_[sample->size] > 0);
node->allocations_[sample->size]--;
+ if (node->allocations_[sample->size] == 0) {
+ node->allocations_.erase(sample->size);
+ while (node->allocations_.empty() && node->children_.empty() &&
+ node->parent_ && !node->parent_->pinned_) {
+ AllocationNode* parent = node->parent_;
+ AllocationNode::FunctionId id = AllocationNode::function_id(
+ node->script_id_, node->script_position_, node->name_);
+ parent->children_.erase(id);
+ delete node;
+ node = parent;
+ }
+ }
sample->profiler->samples_.erase(sample);
delete sample;
}
-SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
- AllocationNode* parent, const char* name, int script_id,
- int start_position) {
- for (AllocationNode* child : parent->children_) {
- if (child->script_id_ == script_id &&
- child->script_position_ == start_position &&
- strcmp(child->name_, name) == 0) {
- return child;
- }
+SamplingHeapProfiler::AllocationNode*
+SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
+ int script_id,
+ int start_position) {
+ FunctionId id = function_id(script_id, start_position, name);
+ auto it = children_.find(id);
+ if (it != children_.end()) {
+ DCHECK(strcmp(it->second->name_, name) == 0);
+ return it->second;
}
- AllocationNode* child = new AllocationNode(name, script_id, start_position);
- parent->children_.push_back(child);
+ auto child = new AllocationNode(this, name, script_id, start_position);
+ children_.insert(std::make_pair(id, child));
return child;
}
@@ -140,7 +155,7 @@
AllocationNode* node = &profile_root_;
std::vector<SharedFunctionInfo*> stack;
- StackTraceFrameIterator it(isolate_);
+ JavaScriptFrameIterator it(isolate_);
int frames_captured = 0;
while (!it.done() && frames_captured < stack_depth_) {
JavaScriptFrame* frame = it.frame();
@@ -173,7 +188,7 @@
name = "(JS)";
break;
}
- return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+ return node->FindOrAddChildNode(name, v8::UnboundScript::kNoScriptId, 0);
}
// We need to process the stack in reverse order as the top of the stack is
@@ -186,14 +201,17 @@
Script* script = Script::cast(shared->script());
script_id = script->id();
}
- node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+ node = node->FindOrAddChildNode(name, script_id, shared->start_position());
}
return node;
}
v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts) {
+ const std::map<int, Handle<Script>>& scripts) {
+ // By pinning the node we make sure its children won't get disposed if
+ // a GC kicks in during the tree retrieval.
+ node->pinned_ = true;
Local<v8::String> script_name =
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
int line = v8::AllocationProfile::kNoLineNumberInfo;
@@ -203,22 +221,21 @@
if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
scripts.find(node->script_id_) != scripts.end()) {
// Cannot use std::map<T>::at because it is not available on android.
- auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
- Script* script = non_const_scripts[node->script_id_];
- if (script) {
+ auto non_const_scripts =
+ const_cast<std::map<int, Handle<Script>>&>(scripts);
+ Handle<Script> script = non_const_scripts[node->script_id_];
+ if (!script.is_null()) {
if (script->name()->IsName()) {
Name* name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
}
- Handle<Script> script_handle(script);
- line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
- column =
- 1 + Script::GetColumnNumber(script_handle, node->script_position_);
+ line = 1 + Script::GetLineNumber(script, node->script_position_);
+ column = 1 + Script::GetColumnNumber(script, node->script_position_);
}
- for (auto alloc : node->allocations_) {
- allocations.push_back(ScaleSample(alloc.first, alloc.second));
- }
+ }
+ for (auto alloc : node->allocations_) {
+ allocations.push_back(ScaleSample(alloc.first, alloc.second));
}
profile->nodes().push_back(v8::AllocationProfile::Node(
@@ -227,35 +244,34 @@
script_name, node->script_id_, node->script_position_, line, column,
std::vector<v8::AllocationProfile::Node*>(), allocations}));
v8::AllocationProfile::Node* current = &profile->nodes().back();
- size_t child_len = node->children_.size();
- // The children vector may have nodes appended to it during translation
+ // The children map may have nodes inserted into it during translation
// because the translation may allocate strings on the JS heap that have
- // the potential to be sampled. We cache the length of the vector before
- // iteration so that nodes appended to the vector during iteration are
- // not processed.
- for (size_t i = 0; i < child_len; i++) {
+ // the potential to be sampled. That's ok since map iterators are not
+ // invalidated upon std::map insertion.
+ for (auto it : node->children_) {
current->children.push_back(
- TranslateAllocationNode(profile, node->children_[i], scripts));
+ TranslateAllocationNode(profile, it.second, scripts));
}
+ node->pinned_ = false;
return current;
}
v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+ if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "SamplingHeapProfiler");
+ }
// To resolve positions to line/column numbers, we will need to look up
// scripts. Build a map to allow fast mapping from script id to script.
- std::map<int, Script*> scripts;
+ std::map<int, Handle<Script>> scripts;
{
Script::Iterator iterator(isolate_);
- Script* script;
- while ((script = iterator.Next())) {
- scripts[script->id()] = script;
+ while (Script* script = iterator.Next()) {
+ scripts[script->id()] = handle(script);
}
}
-
auto profile = new v8::internal::AllocationProfile();
-
TranslateAllocationNode(profile, &profile_root_, scripts);
-
return profile;
}
diff --git a/src/profiler/sampling-heap-profiler.h b/src/profiler/sampling-heap-profiler.h
index 0b538b0..4b7c366 100644
--- a/src/profiler/sampling-heap-profiler.h
+++ b/src/profiler/sampling-heap-profiler.h
@@ -41,7 +41,7 @@
class SamplingHeapProfiler {
public:
SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
- int stack_depth);
+ int stack_depth, v8::HeapProfiler::SamplingFlags flags);
~SamplingHeapProfiler();
v8::AllocationProfile* GetAllocationProfile();
@@ -71,23 +71,47 @@
class AllocationNode {
public:
- AllocationNode(const char* const name, int script_id,
- const int start_position)
- : script_id_(script_id),
+ AllocationNode(AllocationNode* parent, const char* name, int script_id,
+ int start_position)
+ : parent_(parent),
+ script_id_(script_id),
script_position_(start_position),
- name_(name) {}
+ name_(name),
+ pinned_(false) {}
~AllocationNode() {
for (auto child : children_) {
- delete child;
+ delete child.second;
}
}
private:
+ typedef uint64_t FunctionId;
+ static FunctionId function_id(int script_id, int start_position,
+ const char* name) {
+ // script_id == kNoScriptId case:
+ // Use function name pointer as an id. Names derived from VM state
+ // must not collide with the builtin names. The least significant bit
+ // of the id is set to 1.
+ if (script_id == v8::UnboundScript::kNoScriptId) {
+ return reinterpret_cast<intptr_t>(name) | 1;
+ }
+ // script_id != kNoScriptId case:
+ // Use script_id, start_position pair to uniquelly identify the node.
+ // The least significant bit of the id is set to 0.
+ DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
+ return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
+ }
+ AllocationNode* FindOrAddChildNode(const char* name, int script_id,
+ int start_position);
+ // TODO(alph): make use of unordered_map's here. Pay attention to
+ // iterator invalidation during TranslateAllocationNode.
std::map<size_t, unsigned int> allocations_;
- std::vector<AllocationNode*> children_;
+ std::map<FunctionId, AllocationNode*> children_;
+ AllocationNode* const parent_;
const int script_id_;
const int script_position_;
const char* const name_;
+ bool pinned_;
friend class SamplingHeapProfiler;
@@ -110,12 +134,10 @@
// loaded scripts keyed by their script id.
v8::AllocationProfile::Node* TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts);
+ const std::map<int, Handle<Script>>& scripts);
v8::AllocationProfile::Allocation ScaleSample(size_t size,
unsigned int count);
AllocationNode* AddStack();
- AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
- int script_id, int start_position);
Isolate* const isolate_;
Heap* const heap_;
@@ -126,6 +148,7 @@
std::set<Sample*> samples_;
const int stack_depth_;
const uint64_t rate_;
+ v8::HeapProfiler::SamplingFlags flags_;
friend class SamplingAllocationObserver;
};
diff --git a/src/profiler/strings-storage.h b/src/profiler/strings-storage.h
index 7164cae..0849d63 100644
--- a/src/profiler/strings-storage.h
+++ b/src/profiler/strings-storage.h
@@ -6,6 +6,7 @@
#define V8_PROFILER_STRINGS_STORAGE_H_
#include "src/allocation.h"
+#include "src/base/compiler-specific.h"
#include "src/hashmap.h"
namespace v8 {
@@ -19,7 +20,8 @@
~StringsStorage();
const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 3) const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 0)
const char* GetVFormatted(const char* format, va_list args);
const char* GetName(Name* name);
const char* GetName(int index);
diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc
new file mode 100644
index 0000000..3edd964
--- /dev/null
+++ b/src/profiler/tick-sample.cc
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tick-sample.h"
+
+#include "src/frames-inl.h"
+#include "src/vm-state-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool IsSamePage(byte* ptr1, byte* ptr2) {
+ const uint32_t kPageSize = 4096;
+ uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
+ return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
+ (reinterpret_cast<uintptr_t>(ptr2) & mask);
+}
+
+
+// Check if the code at specified address could potentially be a
+// frame setup code.
+bool IsNoFrameRegion(Address address) {
+ struct Pattern {
+ int bytes_count;
+ byte bytes[8];
+ int offsets[4];
+ };
+ byte* pc = reinterpret_cast<byte*>(address);
+ static Pattern patterns[] = {
+#if V8_HOST_ARCH_IA32
+ // push %ebp
+ // mov %esp,%ebp
+ {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ // pop %ebp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // pop %ebp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#elif V8_HOST_ARCH_X64
+ // pushq %rbp
+ // movq %rsp,%rbp
+ {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ // popq %rbp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // popq %rbp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#endif
+ {0, {}, {}}
+ };
+ for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
+ for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
+ int offset = *offset_ptr;
+ if (!offset || IsSamePage(pc, pc - offset)) {
+ MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
+ if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
+ return true;
+ } else {
+ // It is not safe to examine bytes on another page as it might not be
+ // allocated thus causing a SEGFAULT.
+ // Check the pattern part that's on the same page and
+ // pessimistically assume it could be the entire pattern match.
+ MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
+ if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Init(Isolate* isolate,
+ const v8::RegisterState& regs,
+ RecordCEntryFrame record_c_entry_frame,
+ bool update_stats) {
+ timestamp = base::TimeTicks::HighResolutionNow();
+ pc = reinterpret_cast<Address>(regs.pc);
+ state = isolate->current_vm_state();
+ this->update_stats = update_stats;
+
+ // Avoid collecting traces while doing GC.
+ if (state == GC) return;
+
+ Address js_entry_sp = isolate->js_entry_sp();
+ if (js_entry_sp == 0) return; // Not executing JS now.
+
+ if (pc && IsNoFrameRegion(pc)) {
+ // Can't collect stack. Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
+ pc = 0;
+ return;
+ }
+
+ ExternalCallbackScope* scope = isolate->external_callback_scope();
+ Address handler = Isolate::handler(isolate->thread_local_top());
+ // If there is a handler on top of the external callback scope then
+ // we have already entrered JavaScript again and the external callback
+ // is not the top function.
+ if (scope && scope->scope_address() < handler) {
+ external_callback_entry = *scope->callback_entrypoint_address();
+ has_external_callback = true;
+ } else {
+ // sp register may point at an arbitrary place in memory, make
+ // sure MSAN doesn't complain about it.
+ MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
+ has_external_callback = false;
+ }
+
+ SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
+ reinterpret_cast<Address>(regs.sp), js_entry_sp);
+ top_frame_type = it.top_frame_type();
+
+ SampleInfo info;
+ GetStackSample(isolate, regs, record_c_entry_frame,
+ reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
+ frames_count = static_cast<unsigned>(info.frames_count);
+ if (!frames_count) {
+ // It is executing JS but failed to collect a stack trace.
+ // Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
+ pc = 0;
+ }
+}
+
+
+void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
+ RecordCEntryFrame record_c_entry_frame,
+ void** frames, size_t frames_limit,
+ v8::SampleInfo* sample_info) {
+ sample_info->frames_count = 0;
+ sample_info->vm_state = isolate->current_vm_state();
+ if (sample_info->vm_state == GC) return;
+
+ Address js_entry_sp = isolate->js_entry_sp();
+ if (js_entry_sp == 0) return; // Not executing JS now.
+
+ SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
+ reinterpret_cast<Address>(regs.sp), js_entry_sp);
+ size_t i = 0;
+ if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
+ it.top_frame_type() == StackFrame::EXIT) {
+ frames[i++] = isolate->c_function();
+ }
+ while (!it.done() && i < frames_limit) {
+ if (it.frame()->is_interpreted()) {
+ // For interpreted frames use the bytecode array pointer as the pc.
+ InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
+ // Since the sampler can interrupt execution at any point the
+ // bytecode_array might be garbage, so don't dereference it.
+ Address bytecode_array =
+ reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
+ frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
+ frame->GetBytecodeOffset();
+ } else {
+ frames[i++] = it.frame()->pc();
+ }
+ it.Advance();
+ }
+ sample_info->frames_count = i;
+}
+
+
+#if defined(USE_SIMULATOR)
+bool SimulatorHelper::FillRegisters(Isolate* isolate,
+ v8::RegisterState* state) {
+ Simulator *simulator = isolate->thread_local_top()->simulator_;
+ // Check if there is active simulator.
+ if (simulator == NULL) return false;
+#if V8_TARGET_ARCH_ARM
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(
+ Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ state->pc = reinterpret_cast<Address>(simulator->pc());
+ state->sp = reinterpret_cast<Address>(simulator->sp());
+ state->fp = reinterpret_cast<Address>(simulator->fp());
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#endif
+ if (state->sp == 0 || state->fp == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ //
+ // FIXME: The above doesn't really solve the issue.
+ // If a 64-bit target is executed on a 32-bit host even the final
+ // write is non-atomic, so it might obtain a half of the result.
+ // Moreover as long as the register set code uses memcpy (as of now),
+ // it is not guaranteed to be atomic even when both host and target
+ // are of same bitness.
+ return false;
+ }
+ return true;
+}
+#endif // USE_SIMULATOR
+
+} // namespace internal
+} // namespace v8
diff --git a/src/profiler/tick-sample.h b/src/profiler/tick-sample.h
new file mode 100644
index 0000000..fa2cf21
--- /dev/null
+++ b/src/profiler/tick-sample.h
@@ -0,0 +1,76 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TICK_SAMPLE_H_
+#define V8_PROFILER_TICK_SAMPLE_H_
+
+#include "include/v8.h"
+
+#include "src/base/platform/time.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+// TickSample captures the information collected for each sample.
+struct TickSample {
+ // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
+ // include the runtime function we're calling. Externally exposed tick
+ // samples don't care.
+ enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
+
+ TickSample()
+ : state(OTHER),
+ pc(NULL),
+ external_callback_entry(NULL),
+ frames_count(0),
+ has_external_callback(false),
+ update_stats(true),
+ top_frame_type(StackFrame::NONE) {}
+ void Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats);
+ static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame,
+ void** frames, size_t frames_limit,
+ v8::SampleInfo* sample_info);
+ StateTag state; // The state of the VM.
+ Address pc; // Instruction pointer.
+ union {
+ Address tos; // Top stack value (*sp).
+ Address external_callback_entry;
+ };
+ static const unsigned kMaxFramesCountLog2 = 8;
+ static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
+ Address stack[kMaxFramesCount]; // Call stack.
+ base::TimeTicks timestamp;
+ unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
+ bool has_external_callback : 1;
+ bool update_stats : 1; // Whether the sample should update aggregated stats.
+ StackFrame::Type top_frame_type : 5;
+};
+
+
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ // Returns true if register values were successfully retrieved
+ // from the simulator, otherwise returns false.
+ static bool FillRegisters(Isolate* isolate, v8::RegisterState* state);
+};
+#endif // USE_SIMULATOR
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_TICK_SAMPLE_H_