Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE
This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.
FPIIM-449
Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index bbddc87..b6c7945 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -49,12 +49,12 @@
regs.sp = fp - fp_to_sp_delta;
regs.fp = fp;
regs.pc = from;
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
ticks_from_vm_buffer_.Enqueue(record);
}
-
-void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
+void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
+ bool update_stats) {
TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
StackFrameIterator it(isolate);
@@ -64,7 +64,7 @@
regs.fp = frame->fp();
regs.pc = frame->pc();
}
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -429,6 +429,11 @@
profiles_ = new CpuProfilesCollection(isolate()->heap());
}
+void CpuProfiler::CollectSample() {
+ if (processor_ != NULL) {
+ processor_->AddCurrentStack(isolate_);
+ }
+}
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, record_samples)) {
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index e5ef0ac..1a1249c 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -139,7 +139,7 @@
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into tick sample events buffer.
- void AddCurrentStack(Isolate* isolate);
+ void AddCurrentStack(Isolate* isolate, bool update_stats = false);
void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
// Tick sample events are filled directly in the buffer of the circular
@@ -168,8 +168,7 @@
ProfileGenerator* generator_;
Sampler* sampler_;
base::Atomic32 running_;
- // Sampling period in microseconds.
- const base::TimeDelta period_;
+ const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
@@ -205,6 +204,7 @@
virtual ~CpuProfiler();
void set_sampling_interval(base::TimeDelta value);
+ void CollectSample();
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
diff --git a/src/profiler/heap-profiler.cc b/src/profiler/heap-profiler.cc
index 4403e5d..1305cae 100644
--- a/src/profiler/heap-profiler.cc
+++ b/src/profiler/heap-profiler.cc
@@ -8,6 +8,7 @@
#include "src/debug/debug.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/profiler/sampling-heap-profiler.h"
namespace v8 {
namespace internal {
@@ -84,6 +85,31 @@
}
+bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
+ int stack_depth) {
+ if (sampling_heap_profiler_.get()) {
+ return false;
+ }
+ sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
+ heap(), names_.get(), sample_interval, stack_depth));
+ return true;
+}
+
+
+void HeapProfiler::StopSamplingHeapProfiler() {
+ sampling_heap_profiler_.Reset(nullptr);
+}
+
+
+v8::AllocationProfile* HeapProfiler::GetAllocationProfile() {
+ if (sampling_heap_profiler_.get()) {
+ return sampling_heap_profiler_->GetAllocationProfile();
+ } else {
+ return nullptr;
+ }
+}
+
+
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
diff --git a/src/profiler/heap-profiler.h b/src/profiler/heap-profiler.h
index 9a04e83..32e143c 100644
--- a/src/profiler/heap-profiler.h
+++ b/src/profiler/heap-profiler.h
@@ -16,6 +16,7 @@
class AllocationTracker;
class HeapObjectsMap;
class HeapSnapshot;
+class SamplingHeapProfiler;
class StringsStorage;
class HeapProfiler {
@@ -29,6 +30,11 @@
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
+ bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+ void StopSamplingHeapProfiler();
+ bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
+ AllocationProfile* GetAllocationProfile();
+
void StartHeapObjectsTracking(bool track_allocations);
void StopHeapObjectsTracking();
AllocationTracker* allocation_tracker() const {
@@ -79,6 +85,7 @@
base::SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
+ base::SmartPointer<SamplingHeapProfiler> sampling_heap_profiler_;
};
} // namespace internal
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 69ed5e6..fc43f9f 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -11,7 +11,6 @@
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -1109,10 +1108,6 @@
TagObject(js_fun->bound_arguments(), "(bound arguments)");
SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- TagObject(js_fun->creation_context(), "(creation context)");
- SetInternalReference(js_fun, entry, "creation_context",
- js_fun->creation_context(),
- JSBoundFunction::kCreationContextOffset);
SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
SetNativeBindReference(js_obj, entry, "bound_function",
js_fun->bound_target_function());
@@ -1425,18 +1420,17 @@
SetInternalReference(accessor_info, entry, "expected_receiver_type",
accessor_info->expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- if (accessor_info->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* executable_accessor_info =
- ExecutableAccessorInfo::cast(accessor_info);
+ if (accessor_info->IsAccessorInfo()) {
+ AccessorInfo* executable_accessor_info = AccessorInfo::cast(accessor_info);
SetInternalReference(executable_accessor_info, entry, "getter",
executable_accessor_info->getter(),
- ExecutableAccessorInfo::kGetterOffset);
+ AccessorInfo::kGetterOffset);
SetInternalReference(executable_accessor_info, entry, "setter",
executable_accessor_info->setter(),
- ExecutableAccessorInfo::kSetterOffset);
+ AccessorInfo::kSetterOffset);
SetInternalReference(executable_accessor_info, entry, "data",
executable_accessor_info->data(),
- ExecutableAccessorInfo::kDataOffset);
+ AccessorInfo::kDataOffset);
}
}
@@ -1538,7 +1532,7 @@
// Do not visit weak_next as it is not visited by the StaticVisitor,
// and we're not very interested in weak_next field here.
STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
- AllocationSite::BodyDescriptor::kEndOffset);
+ AllocationSite::kPointerFieldsEndOffset);
}
@@ -1604,7 +1598,7 @@
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- if (k != heap_->hidden_string()) {
+ if (k != heap_->hidden_properties_symbol()) {
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
value, NULL, field_offset);
} else {
@@ -1631,7 +1625,7 @@
DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
Object* value = cell->value();
- if (k == heap_->hidden_string()) {
+ if (k == heap_->hidden_properties_symbol()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
continue;
@@ -1648,7 +1642,7 @@
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(k)) {
Object* value = dictionary->ValueAt(i);
- if (k == heap_->hidden_string()) {
+ if (k == heap_->hidden_properties_symbol()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
continue;
@@ -1873,7 +1867,6 @@
bool V8HeapExplorer::IsEssentialObject(Object* object) {
return object->IsHeapObject() && !object->IsOddball() &&
object != heap_->empty_byte_array() &&
- object != heap_->empty_bytecode_array() &&
object != heap_->empty_fixed_array() &&
object != heap_->empty_descriptor_array() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() &&
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index 890f341..58d06c9 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -274,9 +274,8 @@
return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
}
-
ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
- int src_line) {
+ int src_line, bool update_stats) {
ProfileNode* node = root_;
CodeEntry* last_entry = NULL;
for (CodeEntry** entry = path.start() + path.length() - 1;
@@ -290,9 +289,11 @@
if (last_entry && last_entry->has_deopt_info()) {
node->CollectDeoptInfo(last_entry);
}
- node->IncrementSelfTicks();
- if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
- node->IncrementLineTicks(src_line);
+ if (update_stats) {
+ node->IncrementSelfTicks();
+ if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
+ node->IncrementLineTicks(src_line);
+ }
}
return node;
}
@@ -354,11 +355,12 @@
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(isolate) {}
-
void CpuProfile::AddPath(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line) {
- ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path, src_line);
- if (record_samples_) {
+ const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats) {
+ ProfileNode* top_frame_node =
+ top_down_.AddPathFromEnd(path, src_line, update_stats);
+ if (record_samples_ && !timestamp.IsNull()) {
timestamps_.Add(timestamp);
samples_.Add(top_frame_node);
}
@@ -522,15 +524,15 @@
UNREACHABLE();
}
-
void CpuProfilesCollection::AddPathToCurrentProfiles(
- base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line) {
+ base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(timestamp, path, src_line);
+ current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
}
current_profiles_semaphore_.Signal();
}
@@ -595,7 +597,7 @@
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- *entry++ = code_map_.FindEntry(sample.external_callback);
+ *entry++ = code_map_.FindEntry(sample.external_callback_entry);
} else {
CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
// If there is no pc_entry we're likely in native code.
@@ -634,10 +636,9 @@
}
}
- for (const Address* stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end;
- ++stack_pos) {
+ for (const Address *stack_pos = sample.stack,
+ *stack_end = stack_pos + sample.frames_count;
+ stack_pos != stack_end; ++stack_pos) {
*entry = code_map_.FindEntry(*stack_pos);
// Skip unresolved frames (e.g. internal frame) and get source line of
@@ -670,7 +671,8 @@
}
}
- profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line);
+ profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
+ sample.update_stats);
}
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 47a73f1..3c976d6 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -192,7 +192,8 @@
ProfileNode* AddPathFromEnd(
const Vector<CodeEntry*>& path,
- int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
+ int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
+ bool update_stats = true);
ProfileNode* root() const { return root_; }
unsigned next_node_id() { return next_node_id_++; }
unsigned GetFunctionId(const ProfileNode* node);
@@ -225,7 +226,7 @@
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
- int src_line);
+ int src_line, bool update_stats);
void CalculateTotalTicksAndSamplingRate();
const char* title() const { return title_; }
@@ -333,7 +334,8 @@
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line);
+ const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
index dc4c4c4..e331db9 100644
--- a/src/profiler/sampler.cc
+++ b/src/profiler/sampler.cc
@@ -657,10 +657,12 @@
//
DISABLE_ASAN void TickSample::Init(Isolate* isolate,
const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame) {
+ RecordCEntryFrame record_c_entry_frame,
+ bool update_stats) {
timestamp = base::TimeTicks::HighResolutionNow();
pc = reinterpret_cast<Address>(regs.pc);
state = isolate->current_vm_state();
+ this->update_stats = update_stats;
// Avoid collecting traces while doing GC.
if (state == GC) return;
@@ -669,6 +671,8 @@
if (js_entry_sp == 0) return; // Not executing JS now.
if (pc && IsNoFrameRegion(pc)) {
+ // Can't collect stack. Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
pc = 0;
return;
}
@@ -679,7 +683,7 @@
// we have already entrered JavaScript again and the external callback
// is not the top function.
if (scope && scope->scope_address() < handler) {
- external_callback = scope->callback();
+ external_callback_entry = *scope->callback_entrypoint_address();
has_external_callback = true;
} else {
// sp register may point at an arbitrary place in memory, make
@@ -699,6 +703,12 @@
GetStackSample(isolate, regs, record_c_entry_frame,
reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
frames_count = static_cast<unsigned>(info.frames_count);
+ if (!frames_count) {
+ // It is executing JS but failed to collect a stack trace.
+ // Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
+ pc = 0;
+ }
}
@@ -743,7 +753,6 @@
#endif
}
-
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
@@ -751,17 +760,16 @@
has_processing_thread_(false),
active_(false),
is_counting_samples_(false),
- js_and_external_sample_count_(0) {
+ js_sample_count_(0),
+ external_sample_count_(0) {
data_ = new PlatformData;
}
-
Sampler::~Sampler() {
DCHECK(!IsActive());
delete data_;
}
-
void Sampler::Start() {
DCHECK(!IsActive());
SetActive(true);
@@ -796,11 +804,10 @@
TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
- sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame);
- if (is_counting_samples_) {
- if (sample->state == JS || sample->state == EXTERNAL) {
- ++js_and_external_sample_count_;
- }
+ sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
+ if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+ if (sample->state == JS) ++js_sample_count_;
+ if (sample->state == EXTERNAL) ++external_sample_count_;
}
Tick(sample);
if (sample != &sample_obj) {
diff --git a/src/profiler/sampler.h b/src/profiler/sampler.h
index 354e935..8e8ef1c 100644
--- a/src/profiler/sampler.h
+++ b/src/profiler/sampler.h
@@ -34,12 +34,13 @@
TickSample()
: state(OTHER),
pc(NULL),
- external_callback(NULL),
+ external_callback_entry(NULL),
frames_count(0),
has_external_callback(false),
+ update_stats(true),
top_frame_type(StackFrame::NONE) {}
void Init(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame);
+ RecordCEntryFrame record_c_entry_frame, bool update_stats);
static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
RecordCEntryFrame record_c_entry_frame,
void** frames, size_t frames_limit,
@@ -48,7 +49,7 @@
Address pc; // Instruction pointer.
union {
Address tos; // Top stack value (*sp).
- Address external_callback;
+ Address external_callback_entry;
};
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
@@ -56,6 +57,7 @@
base::TimeTicks timestamp;
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
+ bool update_stats : 1; // Whether the sample should update aggregated stats.
StackFrame::Type top_frame_type : 4;
};
@@ -98,12 +100,12 @@
}
// Used in tests to make sure that stack sampling is performed.
- unsigned js_and_external_sample_count() const {
- return js_and_external_sample_count_;
- }
+ unsigned js_sample_count() const { return js_sample_count_; }
+ unsigned external_sample_count() const { return external_sample_count_; }
void StartCountingSamples() {
- is_counting_samples_ = true;
- js_and_external_sample_count_ = 0;
+ js_sample_count_ = 0;
+ external_sample_count_ = 0;
+ is_counting_samples_ = true;
}
class PlatformData;
@@ -123,9 +125,10 @@
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
PlatformData* data_; // Platform specific data.
+ // Counts stack samples taken in various VM states.
bool is_counting_samples_;
- // Counts stack samples taken in JS VM state.
- unsigned js_and_external_sample_count_;
+ unsigned js_sample_count_;
+ unsigned external_sample_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
new file mode 100644
index 0000000..c13538c
--- /dev/null
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -0,0 +1,260 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/sampling-heap-profiler.h"
+
+#include <stdint.h>
+#include <memory>
+#include "src/api.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/frames-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+namespace internal {
+
+// We sample with a Poisson process, with constant average sampling interval.
+// This follows the exponential probability distribution with parameter
+// λ = 1/rate where rate is the average number of bytes between samples.
+//
+// Let u be a uniformly distributed random number between 0 and 1, then
+// next_sample = (- ln u) / λ
+intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
+ if (FLAG_sampling_heap_profiler_suppress_randomness) {
+ return static_cast<intptr_t>(rate);
+ }
+ double u = random_->NextDouble();
+ double next = (-std::log(u)) * rate;
+ return next < kPointerSize
+ ? kPointerSize
+ : (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
+}
+
+// Samples were collected according to a poisson process. Since we have not
+// recorded all allocations, we must approximate the shape of the underlying
+// space of allocations based on the samples we have collected. Given that
+// we sample at rate R, the probability that an allocation of size S will be
+// sampled is 1-exp(-S/R). This function uses the above probability to
+// approximate the true number of allocations with size *size* given that
+// *count* samples were observed.
+v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
+ size_t size, unsigned int count) {
+ double scale = 1.0 / (1.0 - std::exp(-static_cast<double>(size) / rate_));
+ // Round count instead of truncating.
+ return {size, static_cast<unsigned int>(count * scale + 0.5)};
+}
+
+SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
+ uint64_t rate, int stack_depth)
+ : isolate_(heap->isolate()),
+ heap_(heap),
+ new_space_observer_(new SamplingAllocationObserver(
+ heap_, static_cast<intptr_t>(rate), rate, this,
+ heap->isolate()->random_number_generator())),
+ other_spaces_observer_(new SamplingAllocationObserver(
+ heap_, static_cast<intptr_t>(rate), rate, this,
+ heap->isolate()->random_number_generator())),
+ names_(names),
+ profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+ samples_(),
+ stack_depth_(stack_depth),
+ rate_(rate) {
+ CHECK_GT(rate_, 0);
+ heap->new_space()->AddAllocationObserver(new_space_observer_.get());
+ AllSpaces spaces(heap);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ if (space != heap->new_space()) {
+ space->AddAllocationObserver(other_spaces_observer_.get());
+ }
+ }
+}
+
+
+SamplingHeapProfiler::~SamplingHeapProfiler() {
+ heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
+ AllSpaces spaces(heap_);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ if (space != heap_->new_space()) {
+ space->RemoveAllocationObserver(other_spaces_observer_.get());
+ }
+ }
+
+ for (auto sample : samples_) {
+ delete sample;
+ }
+ std::set<Sample*> empty;
+ samples_.swap(empty);
+}
+
+
+void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
+ DisallowHeapAllocation no_allocation;
+
+ HandleScope scope(isolate_);
+ HeapObject* heap_object = HeapObject::FromAddress(soon_object);
+ Handle<Object> obj(heap_object, isolate_);
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable while we
+ // are taking the sample.
+ heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size));
+
+ Local<v8::Value> loc = v8::Utils::ToLocal(obj);
+
+ AllocationNode* node = AddStack();
+ node->allocations_[size]++;
+ Sample* sample = new Sample(size, node, loc, this);
+ samples_.insert(sample);
+ sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+}
+
+void SamplingHeapProfiler::OnWeakCallback(
+ const WeakCallbackInfo<Sample>& data) {
+ Sample* sample = data.GetParameter();
+ AllocationNode* node = sample->owner;
+ DCHECK(node->allocations_[sample->size] > 0);
+ node->allocations_[sample->size]--;
+ sample->profiler->samples_.erase(sample);
+ delete sample;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
+ AllocationNode* parent, const char* name, int script_id,
+ int start_position) {
+ for (AllocationNode* child : parent->children_) {
+ if (child->script_id_ == script_id &&
+ child->script_position_ == start_position &&
+ strcmp(child->name_, name) == 0) {
+ return child;
+ }
+ }
+ AllocationNode* child = new AllocationNode(name, script_id, start_position);
+ parent->children_.push_back(child);
+ return child;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
+ AllocationNode* node = &profile_root_;
+
+ std::vector<SharedFunctionInfo*> stack;
+ StackTraceFrameIterator it(isolate_);
+ int frames_captured = 0;
+ while (!it.done() && frames_captured < stack_depth_) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ stack.push_back(shared);
+
+ frames_captured++;
+ it.Advance();
+ }
+
+ if (frames_captured == 0) {
+ const char* name = nullptr;
+ switch (isolate_->current_vm_state()) {
+ case GC:
+ name = "(GC)";
+ break;
+ case COMPILER:
+ name = "(COMPILER)";
+ break;
+ case OTHER:
+ name = "(V8 API)";
+ break;
+ case EXTERNAL:
+ name = "(EXTERNAL)";
+ break;
+ case IDLE:
+ name = "(IDLE)";
+ break;
+ case JS:
+ name = "(JS)";
+ break;
+ }
+ return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+ }
+
+ // We need to process the stack in reverse order as the top of the stack is
+ // the first element in the list.
+ for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
+ SharedFunctionInfo* shared = *it;
+ const char* name = this->names()->GetFunctionName(shared->DebugName());
+ int script_id = v8::UnboundScript::kNoScriptId;
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ script_id = script->id();
+ }
+ node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+ }
+ return node;
+}
+
+v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
+ AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+ const std::map<int, Script*>& scripts) {
+ Local<v8::String> script_name =
+ ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
+ int line = v8::AllocationProfile::kNoLineNumberInfo;
+ int column = v8::AllocationProfile::kNoColumnNumberInfo;
+ std::vector<v8::AllocationProfile::Allocation> allocations;
+ allocations.reserve(node->allocations_.size());
+ if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+ // Cannot use std::map<T>::at because it is not available on android.
+ auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
+ Script* script = non_const_scripts[node->script_id_];
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ script_name = ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+ }
+ Handle<Script> script_handle(script);
+
+ line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
+ column = 1 + Script::GetColumnNumber(script_handle, node->script_position_);
+ for (auto alloc : node->allocations_) {
+ allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ }
+ }
+
+ profile->nodes().push_back(v8::AllocationProfile::Node(
+ {ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(node->name_)),
+ script_name, node->script_id_, node->script_position_, line, column,
+ std::vector<v8::AllocationProfile::Node*>(), allocations}));
+ v8::AllocationProfile::Node* current = &profile->nodes().back();
+ size_t child_len = node->children_.size();
+ // The children vector may have nodes appended to it during translation
+ // because the translation may allocate strings on the JS heap that have
+ // the potential to be sampled. We cache the length of the vector before
+ // iteration so that nodes appended to the vector during iteration are
+ // not processed.
+ for (size_t i = 0; i < child_len; i++) {
+ current->children.push_back(
+ TranslateAllocationNode(profile, node->children_[i], scripts));
+ }
+ return current;
+}
+
+v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+ // To resolve positions to line/column numbers, we will need to look up
+ // scripts. Build a map to allow fast mapping from script id to script.
+ std::map<int, Script*> scripts;
+ {
+ Script::Iterator iterator(isolate_);
+ Script* script;
+ while ((script = iterator.Next())) {
+ scripts[script->id()] = script;
+ }
+ }
+
+ auto profile = new v8::internal::AllocationProfile();
+
+ TranslateAllocationNode(profile, &profile_root_, scripts);
+
+ return profile;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/src/profiler/sampling-heap-profiler.h b/src/profiler/sampling-heap-profiler.h
new file mode 100644
index 0000000..0b538b0
--- /dev/null
+++ b/src/profiler/sampling-heap-profiler.h
@@ -0,0 +1,166 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+#define V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+
+#include <deque>
+#include <map>
+#include <set>
+#include "include/v8-profiler.h"
+#include "src/heap/heap.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
+namespace internal {
+
+class SamplingAllocationObserver;
+
+class AllocationProfile : public v8::AllocationProfile {
+ public:
+ AllocationProfile() : nodes_() {}
+
+ v8::AllocationProfile::Node* GetRootNode() override {
+ return nodes_.size() == 0 ? nullptr : &nodes_.front();
+ }
+
+ std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
+
+ private:
+ std::deque<v8::AllocationProfile::Node> nodes_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
+};
+
+class SamplingHeapProfiler {
+ public:
+ SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
+ int stack_depth);
+ ~SamplingHeapProfiler();
+
+ v8::AllocationProfile* GetAllocationProfile();
+
+ StringsStorage* names() const { return names_; }
+
+ class AllocationNode;
+
+ struct Sample {
+ public:
+ Sample(size_t size_, AllocationNode* owner_, Local<Value> local_,
+ SamplingHeapProfiler* profiler_)
+ : size(size_),
+ owner(owner_),
+ global(Global<Value>(
+ reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
+ profiler(profiler_) {}
+ ~Sample() { global.Reset(); }
+ const size_t size;
+ AllocationNode* const owner;
+ Global<Value> global;
+ SamplingHeapProfiler* const profiler;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Sample);
+ };
+
+ class AllocationNode {
+ public:
+ AllocationNode(const char* const name, int script_id,
+ const int start_position)
+ : script_id_(script_id),
+ script_position_(start_position),
+ name_(name) {}
+ ~AllocationNode() {
+ for (auto child : children_) {
+ delete child;
+ }
+ }
+
+ private:
+ std::map<size_t, unsigned int> allocations_;
+ std::vector<AllocationNode*> children_;
+ const int script_id_;
+ const int script_position_;
+ const char* const name_;
+
+ friend class SamplingHeapProfiler;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationNode);
+ };
+
+ private:
+ Heap* heap() const { return heap_; }
+
+ void SampleObject(Address soon_object, size_t size);
+
+ static void OnWeakCallback(const WeakCallbackInfo<Sample>& data);
+
+ // Methods that construct v8::AllocationProfile.
+
+ // Translates the provided AllocationNode *node* returning an equivalent
+ // AllocationProfile::Node. The newly created AllocationProfile::Node is added
+ // to the provided AllocationProfile *profile*. Line numbers, column numbers,
+ // and script names are resolved using *scripts* which maps all currently
+ // loaded scripts keyed by their script id.
+ v8::AllocationProfile::Node* TranslateAllocationNode(
+ AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+ const std::map<int, Script*>& scripts);
+ v8::AllocationProfile::Allocation ScaleSample(size_t size,
+ unsigned int count);
+ AllocationNode* AddStack();
+ AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
+ int script_id, int start_position);
+
+ Isolate* const isolate_;
+ Heap* const heap_;
+ base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
+ base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
+ StringsStorage* const names_;
+ AllocationNode profile_root_;
+ std::set<Sample*> samples_;
+ const int stack_depth_;
+ const uint64_t rate_;
+
+ friend class SamplingAllocationObserver;
+};
+
+class SamplingAllocationObserver : public AllocationObserver {
+ public:
+ SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
+ SamplingHeapProfiler* profiler,
+ base::RandomNumberGenerator* random)
+ : AllocationObserver(step_size),
+ profiler_(profiler),
+ heap_(heap),
+ random_(random),
+ rate_(rate) {}
+ virtual ~SamplingAllocationObserver() {}
+
+ protected:
+ void Step(int bytes_allocated, Address soon_object, size_t size) override {
+ USE(heap_);
+ DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+ DCHECK(soon_object);
+ profiler_->SampleObject(soon_object, size);
+ }
+
+ intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
+
+ private:
+ intptr_t GetNextSampleInterval(uint64_t rate);
+ SamplingHeapProfiler* const profiler_;
+ Heap* const heap_;
+ base::RandomNumberGenerator* const random_;
+ uint64_t const rate_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_SAMPLING_HEAP_PROFILER_H_