Version 3.15.8
Enforced stack allocation of TryCatch blocks. (issue 2166,chromium:152389)
Fixed external exceptions in external try-catch handlers. (issue 2166)
Activated incremental code flushing by default.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@13133 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/builtins.cc b/src/builtins.cc
index d9f8d15..d62713d 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1252,12 +1252,28 @@
//
+// Searches the hidden prototype chain of the given object for the first
+// object that is an instance of the given type. If no such object can
+// be found then Heap::null_value() is returned.
+static inline Object* FindHidden(Heap* heap,
+ Object* object,
+ FunctionTemplateInfo* type) {
+ if (object->IsInstanceOf(type)) return object;
+ Object* proto = object->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
+ return FindHidden(heap, proto, type);
+ }
+ return heap->null_value();
+}
+
+
// Returns the holder JSObject if the function can legally be called
// with this receiver. Returns Heap::null_value() if the call is
// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Arguments that do fit the expected
-// type is overwritten with the object in the prototype chain that
-// actually has that type.
+// overwritten with undefined. Note that holder and the arguments are
+// implicitly rewritten with the first object in the hidden prototype
+// chain that actually has the expected type.
static inline Object* TypeCheck(Heap* heap,
int argc,
Object** argv,
@@ -1270,15 +1286,10 @@
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
// If necessary, check the receiver
Object* recv_type = sig->receiver();
-
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
- if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
- break;
- }
- }
- if (holder == heap->null_value()) return holder;
+ holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
+ if (holder == heap->null_value()) return heap->null_value();
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@@ -1291,13 +1302,9 @@
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- for (; current != heap->null_value(); current = current->GetPrototype()) {
- if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
- *arg = current;
- break;
- }
- }
- if (current == heap->null_value()) *arg = heap->undefined_value();
+ current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
+ if (current == heap->null_value()) current = heap->undefined_value();
+ *arg = current;
}
return holder;
}
diff --git a/src/compiler.cc b/src/compiler.cc
index ff6e05d..5779aae 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -618,7 +618,6 @@
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
- result->code()->MakeYoung();
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -680,7 +679,6 @@
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
- result->code()->MakeYoung();
}
return result;
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index 1133b20..4982197 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -31,6 +31,7 @@
#include "cpu-profiler.h"
#include <new>
+#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@@ -55,18 +56,11 @@
}
-TickSample* ProfilerEventsProcessor::StartTickSampleEvent() {
- if (!ticks_buffer_is_empty_ || ticks_buffer_is_initialized_) return NULL;
- ticks_buffer_is_initialized_ = true;
+TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
- ticks_buffer_ = TickSampleEventRecord(enqueue_order_);
- return &ticks_buffer_.sample;
-}
-
-
-void ProfilerEventsProcessor::FinishTickSampleEvent() {
- ASSERT(ticks_buffer_is_initialized_ && ticks_buffer_is_empty_);
- ticks_buffer_is_empty_ = false;
+ TickSampleEventRecord* evt =
+ new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
+ return &evt->sample;
}
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 08c82db..3cbac77 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -39,19 +39,19 @@
namespace v8 {
namespace internal {
+static const int kEventsBufferSize = 256 * KB;
+static const int kTickSamplesBufferChunkSize = 64 * KB;
+static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- sampler_(sampler),
running_(true),
- period_in_useconds_(period_in_useconds),
- ticks_buffer_is_empty_(true),
- ticks_buffer_is_initialized_(false),
+ ticks_buffer_(sizeof(TickSampleEventRecord),
+ kTickSamplesBufferChunkSize,
+ kTickSamplesBufferChunksCount),
enqueue_order_(0) {
}
@@ -215,17 +215,23 @@
generator_->RecordTickSample(record.sample);
}
- if (ticks_buffer_is_empty_) return !ticks_from_vm_buffer_.IsEmpty();
- if (ticks_buffer_.order == dequeue_order) {
+ const TickSampleEventRecord* rec =
+ TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
+ if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
+ // Make a local copy of tick sample record to ensure that it won't
+ // be modified as we are processing it. This is possible as the
+ // sampler writes w/o any sync to the queue, so if the processor
+ // will get far behind, a record may be modified right under its
+ // feet.
+ TickSampleEventRecord record = *rec;
+ if (record.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
- if (ticks_buffer_.sample.frames_count < 0
- || ticks_buffer_.sample.frames_count > TickSample::kMaxFramesCount) {
- ticks_buffer_.sample.frames_count = 0;
- }
- generator_->RecordTickSample(ticks_buffer_.sample);
- ticks_buffer_is_empty_ = true;
- ticks_buffer_is_initialized_ = false;
+ if (record.sample.frames_count < 0
+ || record.sample.frames_count > TickSample::kMaxFramesCount)
+ record.sample.frames_count = 0;
+ generator_->RecordTickSample(record.sample);
+ ticks_buffer_.FinishDequeue();
} else {
return true;
}
@@ -233,29 +239,22 @@
}
-void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time,
- unsigned* dequeue_order) {
- while (OS::Ticks() < stop_time) {
- if (ProcessTicks(*dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(dequeue_order);
- }
- }
-}
-
-
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- int64_t stop_time = OS::Ticks() + period_in_useconds_;
- if (sampler_ != NULL) {
- sampler_->DoSample();
+ // Process ticks until we have any.
+ if (ProcessTicks(dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(&dequeue_order);
}
- ProcessEventsQueue(stop_time, &dequeue_order);
+ YieldCPU();
}
+ // Process remaining tick events.
+ ticks_buffer_.FlushResidualRecords();
+ // Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
}
@@ -311,22 +310,15 @@
}
-TickSample* CpuProfiler::StartTickSampleEvent(Isolate* isolate) {
+TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->StartTickSampleEvent();
+ return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
return NULL;
}
}
-void CpuProfiler::FinishTickSampleEvent(Isolate* isolate) {
- if (CpuProfiler::is_profiling(isolate)) {
- isolate->cpu_profiler()->processor_->FinishTickSampleEvent();
- }
-}
-
-
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
@@ -494,15 +486,13 @@
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
- Sampler* sampler = isolate->logger()->sampler();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_,
- sampler,
- FLAG_cpu_profiler_sampling_period);
+ processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
+ processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -515,13 +505,12 @@
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
- sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
- processor_->Start();
}
}
@@ -556,17 +545,16 @@
void CpuProfiler::StopProcessor() {
- NoBarrier_Store(&is_profiling_, false);
- processor_->Stop();
- processor_->Join();
Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = logger->sampler();
+ Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
- sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
+ NoBarrier_Store(&is_profiling_, false);
+ processor_->Stop();
+ processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index f4bc0c7..9cd4484 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -124,9 +124,7 @@
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -158,12 +156,11 @@
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
- // StartTickSampleEvent returns a pointer only if the ticks_buffer_ is empty,
- // FinishTickSampleEvent marks the ticks_buffer_ as filled.
- // Finish should be called only after successful Start (returning non-NULL
- // pointer).
- INLINE(TickSample* StartTickSampleEvent());
- INLINE(void FinishTickSampleEvent());
+ // Tick sample events are filled directly in the buffer of the circular
+ // queue (because the structure is of fixed width, but usually not all
+ // stack frame entries are filled.) This method returns a pointer to the
+ // next record of the buffer.
+ INLINE(TickSample* TickSampleEvent());
private:
union CodeEventsContainer {
@@ -176,19 +173,13 @@
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
- void ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
- Sampler* sampler_;
bool running_;
- // Sampling period in microseconds.
- const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- TickSampleEventRecord ticks_buffer_;
- bool ticks_buffer_is_empty_;
- bool ticks_buffer_is_initialized_;
+ SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
@@ -227,10 +218,7 @@
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
- // Finish should be called only after successful Start (returning non-NULL
- // pointer).
- static TickSample* StartTickSampleEvent(Isolate* isolate);
- static void FinishTickSampleEvent(Isolate* isolate);
+ static TickSample* TickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 163a0bd..ea1a17d 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1306,9 +1306,12 @@
};
-ProtocolMessage.prototype.failed = function(message) {
+ProtocolMessage.prototype.failed = function(message, opt_details) {
this.success = false;
this.message = message;
+ if (IS_OBJECT(opt_details)) {
+ this.error_details = opt_details;
+ }
};
@@ -1355,6 +1358,9 @@
if (this.message) {
json.message = this.message;
}
+ if (this.error_details) {
+ json.error_details = this.error_details;
+ }
json.running = this.running;
return JSON.stringify(json);
};
@@ -1427,6 +1433,8 @@
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
+ } else if (request.command == 'setVariableValue') {
+ this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') {
@@ -1953,11 +1961,12 @@
};
-DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
+ function(scope_description) {
// Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame.
- if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
- frame_index = request.arguments.frameNumber;
+ if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
+ frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number');
}
@@ -1971,13 +1980,13 @@
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
- function(request) {
- if (request.arguments && "functionHandle" in request.arguments) {
- if (!IS_NUMBER(request.arguments.functionHandle)) {
+DebugCommandProcessor.prototype.resolveScopeHolder_ =
+ function(scope_description) {
+ if (scope_description && "functionHandle" in scope_description) {
+ if (!IS_NUMBER(scope_description.functionHandle)) {
throw new Error('Function handle must be a number');
}
- var function_mirror = LookupMirror(request.arguments.functionHandle);
+ var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
@@ -1992,14 +2001,14 @@
}
// Get the frame for which the scopes are requested.
- var frame = this.frameForScopeRequest_(request);
+ var frame = this.resolveFrameFromScopeDescription_(scope_description);
return frame;
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- var scope_holder = this.scopeHolderForScopeRequest_(request);
+ var scope_holder = this.resolveScopeHolder_(request.arguments);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
@@ -2018,7 +2027,7 @@
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested.
- var scope_holder = this.scopeHolderForScopeRequest_(request);
+ var scope_holder = this.resolveScopeHolder_(request.arguments);
// With no scope argument just return top scope.
var scope_index = 0;
@@ -2033,6 +2042,77 @@
};
+// Reads value from protocol description. Description may be in form of type
+// (for singletons), raw value (primitive types supported in JSON),
+// string value description plus type (for primitive values) or handle id.
+// Returns raw value or throws exception.
+DebugCommandProcessor.resolveValue_ = function(value_description) {
+ if ("handle" in value_description) {
+ var value_mirror = LookupMirror(value_description.handle);
+ if (!value_mirror) {
+ throw new Error("Failed to resolve value by handle, ' #" +
+ mapping.handle + "# not found");
+ }
+ return value_mirror.value();
+ } else if ("stringDescription" in value_description) {
+ if (value_description.type == BOOLEAN_TYPE) {
+ return Boolean(value_description.stringDescription);
+ } else if (value_description.type == NUMBER_TYPE) {
+ return Number(value_description.stringDescription);
+ } if (value_description.type == STRING_TYPE) {
+ return String(value_description.stringDescription);
+ } else {
+ throw new Error("Unknown type");
+ }
+ } else if ("value" in value_description) {
+ return value_description.value;
+ } else if (value_description.type == UNDEFINED_TYPE) {
+ return void 0;
+ } else if (value_description.type == NULL_TYPE) {
+ return null;
+ } else {
+ throw new Error("Failed to parse value description");
+ }
+};
+
+
+DebugCommandProcessor.prototype.setVariableValueRequest_ =
+ function(request, response) {
+ if (!request.arguments) {
+ response.failed('Missing arguments');
+ return;
+ }
+
+ if (IS_UNDEFINED(request.arguments.name)) {
+ response.failed('Missing variable name');
+ }
+ var variable_name = request.arguments.name;
+
+ var scope_description = request.arguments.scope;
+
+ // Get the frame or function for which the scope is requested.
+ var scope_holder = this.resolveScopeHolder_(scope_description);
+
+ if (IS_UNDEFINED(scope_description.number)) {
+ response.failed('Missing scope number');
+ }
+ var scope_index = %ToNumber(scope_description.number);
+
+ var scope = scope_holder.scope(scope_index);
+
+ var new_value =
+ DebugCommandProcessor.resolveValue_(request.arguments.newValue);
+
+ scope.setVariableValue(variable_name, new_value);
+
+ var new_value_mirror = MakeMirror(new_value);
+
+ response.body = {
+ newValue: new_value_mirror
+ };
+};
+
+
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -2387,8 +2467,17 @@
var new_source = request.arguments.new_source;
- var result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
+ var result_description;
+ try {
+ result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
+ } catch (e) {
+ if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
+ response.failed(e.message, e.details);
+ return;
+ }
+ throw e;
+ }
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@@ -2663,3 +2752,7 @@
}
return json;
}
+
+Debug.TestApi = {
+ CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
+};
diff --git a/src/debug.cc b/src/debug.cc
index ec25acc..ea1c084 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -786,9 +786,11 @@
"error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
- isolate->clear_pending_exception();
+ if (!exception.is_null()) {
+ isolate->set_pending_exception(*exception);
+ MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ isolate->clear_pending_exception();
+ }
return false;
}
diff --git a/src/execution.cc b/src/execution.cc
index 8942fb3..67f67e8 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -211,6 +211,9 @@
Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
+ if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
+ V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
+ }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 61cd864..207fcee 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -350,10 +350,6 @@
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
-// cpu-profiler.cc
-DEFINE_int(cpu_profiler_sampling_period, 1000,
- "CPU profiler sampling period in microseconds")
-
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
@@ -402,7 +398,7 @@
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
-DEFINE_bool(flush_code_incrementally, false,
+DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 0006f8e..085f1d4 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -551,6 +551,51 @@
}
+bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
+ WeakSlotCallbackWithHeap can_skip) {
+ int last = 0;
+ bool any_group_was_visited = false;
+ for (int i = 0; i < object_groups_.length(); i++) {
+ ObjectGroup* entry = object_groups_.at(i);
+ ASSERT(entry != NULL);
+
+ Object*** objects = entry->objects_;
+ bool group_should_be_visited = false;
+ for (size_t j = 0; j < entry->length_; j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ if (!can_skip(isolate_->heap(), &object)) {
+ group_should_be_visited = true;
+ break;
+ }
+ }
+ }
+
+ if (!group_should_be_visited) {
+ object_groups_[last++] = entry;
+ continue;
+ }
+
+ // An object in the group requires visiting, so iterate over all
+ // objects in the group.
+ for (size_t j = 0; j < entry->length_; ++j) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ v->VisitPointer(&object);
+ any_group_was_visited = true;
+ }
+ }
+
+ // Once the entire group has been iterated over, set the object
+ // group to NULL so it won't be processed again.
+ entry->Dispose();
+ object_groups_.at(i) = NULL;
+ }
+ object_groups_.Rewind(last);
+ return any_group_was_visited;
+}
+
+
bool GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector) {
// Process weak global handle callbacks. This must be done after the
diff --git a/src/global-handles.h b/src/global-handles.h
index 482baef..904c5b5 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -206,6 +206,11 @@
// See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ // Iterate over objects in object groups that have at least one object
+ // which requires visiting. The callback has to return true if objects
+ // can be skipped and false otherwise.
+ bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
+
// Add an object group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a garbage collection.
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 33bc4f9..de47c94 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -460,7 +460,7 @@
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes >= 0) {
+ if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
diff --git a/src/heap.cc b/src/heap.cc
index ff791d8..dac28f3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1338,7 +1338,8 @@
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- while (IterateObjectGroups(&scavenge_visitor)) {
+ while (isolate()->global_handles()->IterateObjectGroups(
+ &scavenge_visitor, &IsUnscavengedHeapObject)) {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
isolate()->global_handles()->RemoveObjectGroups();
@@ -1383,51 +1384,6 @@
}
-// TODO(mstarzinger): Unify this method with
-// MarkCompactCollector::MarkObjectGroups().
-bool Heap::IterateObjectGroups(ObjectVisitor* scavenge_visitor) {
- List<ObjectGroup*>* object_groups =
- isolate()->global_handles()->object_groups();
-
- int last = 0;
- bool changed = false;
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_marked = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- if (!IsUnscavengedHeapObject(this, &object)) {
- group_marked = true;
- break;
- }
- }
- }
-
- if (!group_marked) {
- (*object_groups)[last++] = entry;
- continue;
- }
-
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- scavenge_visitor->VisitPointer(&object);
- changed = true;
- }
- }
-
- entry->Dispose();
- object_groups->at(i) = NULL;
- }
- object_groups->Rewind(last);
- return changed;
-}
-
-
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
diff --git a/src/heap.h b/src/heap.h
index 28b082a..606f787 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1901,8 +1901,6 @@
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
- bool IterateObjectGroups(ObjectVisitor* scavenge_visitor);
-
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
diff --git a/src/isolate.cc b/src/isolate.cc
index ef4e0af..57809ce 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1062,9 +1062,12 @@
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
+ PropagatePendingExceptionToExternalTryCatch();
+ if (has_pending_exception()) {
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ }
}
@@ -1366,6 +1369,24 @@
}
+MessageLocation Isolate::GetMessageLocation() {
+ ASSERT(has_pending_exception());
+
+ if (thread_local_top_.pending_exception_ != Failure::OutOfMemoryException() &&
+ thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ thread_local_top_.has_pending_message_ &&
+ !thread_local_top_.pending_message_obj_->IsTheHole() &&
+ thread_local_top_.pending_message_script_ != NULL) {
+ Handle<Script> script(thread_local_top_.pending_message_script_);
+ int start_pos = thread_local_top_.pending_message_start_pos_;
+ int end_pos = thread_local_top_.pending_message_end_pos_;
+ return MessageLocation(script, start_pos, end_pos);
+ }
+
+ return MessageLocation();
+}
+
+
void Isolate::TraceException(bool flag) {
FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
}
diff --git a/src/isolate.h b/src/isolate.h
index 2faee75..921add3 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -743,6 +743,8 @@
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
+ // Return pending location if any or unfilled structure.
+ MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 80ef395..09014f0 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -136,8 +136,7 @@
int length));
template <bool is_ascii, typename Char>
- INLINE(void SerializeString_(Vector<const Char> vector,
- Handle<String> string));
+ INLINE(void SerializeString_(Handle<String> string));
template <typename Char>
INLINE(bool DoNotEscape(Char c));
@@ -675,9 +674,8 @@
template <bool is_ascii, typename Char>
-void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
- Handle<String> string) {
- int length = vector.length();
+void BasicJsonStringifier::SerializeString_(Handle<String> string) {
+ int length = string->length();
Append_<is_ascii, char>('"');
// We make a rough estimate to find out if the current string can be
// serialized without allocating a new string part. The worst case length of
@@ -685,6 +683,8 @@
// is a more pessimistic estimate, but faster to calculate.
if (((part_length_ - current_index_) >> 3) > length) {
+ AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
if (is_ascii) {
SerializeStringUnchecked_(
vector.start(),
@@ -698,6 +698,7 @@
}
} else {
String* string_location = *string;
+ Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
Char c = vector[i];
if (DoNotEscape(c)) {
@@ -751,16 +752,16 @@
String::FlatContent flat = object->GetFlatContent();
if (is_ascii_) {
if (flat.IsAscii()) {
- SerializeString_<true, char>(flat.ToAsciiVector(), object);
+ SerializeString_<true, char>(object);
} else {
ChangeEncoding();
SerializeString(object);
}
} else {
if (flat.IsAscii()) {
- SerializeString_<false, char>(flat.ToAsciiVector(), object);
+ SerializeString_<false, char>(object);
} else {
- SerializeString_<false, uc16>(flat.ToUC16Vector(), object);
+ SerializeString_<false, uc16>(object);
}
}
}
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index cfcdb81..451b146 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -76,7 +76,17 @@
try {
new_compile_info = GatherCompileInfo(new_source, script);
} catch (e) {
- throw new Failure("Failed to compile new version of script: " + e);
+ var failure =
+ new Failure("Failed to compile new version of script: " + e);
+ if (e instanceof SyntaxError) {
+ var details = {
+ type: "liveedit_compile_error",
+ syntaxErrorMessage: e.message
+ };
+ CopyErrorPositionToDetails(e, details);
+ failure.details = details;
+ }
+ throw failure;
}
var root_new_node = BuildCodeInfoTree(new_compile_info);
@@ -978,6 +988,31 @@
return "LiveEdit Failure: " + this.message;
};
+ function CopyErrorPositionToDetails(e, details) {
+ function createPositionStruct(script, position) {
+ if (position == -1) return;
+ var location = script.locationFromPosition(position, true);
+ if (location == null) return;
+ return {
+ line: location.line + 1,
+ column: location.column + 1,
+ position: position
+ };
+ }
+
+ if (!("scriptObject" in e) || !("startPosition" in e)) {
+ return;
+ }
+
+ var script = e.scriptObject;
+
+ var position_struct = {
+ start: createPositionStruct(script, e.startPosition),
+ end: createPositionStruct(script, e.endPosition)
+ };
+ details.position = position_struct;
+ }
+
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 574a376..f491e37 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -36,6 +36,7 @@
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
+#include "messages.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
@@ -925,11 +926,59 @@
Handle<Object> original_source = Handle<Object>(script->source());
script->set_source(*source);
isolate->set_active_function_info_listener(&listener);
- CompileScriptForTracker(isolate, script);
+
+ {
+ // Creating verbose TryCatch from public API is currently the only way to
+ // force code save location. We do not use this the object directly.
+ v8::TryCatch try_catch;
+ try_catch.SetVerbose(true);
+
+ // A logical 'try' section.
+ CompileScriptForTracker(isolate, script);
+ }
+
+ // A logical 'catch' section.
+ Handle<JSObject> rethrow_exception;
+ if (isolate->has_pending_exception()) {
+ Handle<Object> exception(isolate->pending_exception()->ToObjectChecked());
+ MessageLocation message_location = isolate->GetMessageLocation();
+
+ isolate->clear_pending_message();
+ isolate->clear_pending_exception();
+
+ // If possible, copy positions from message object to exception object.
+ if (exception->IsJSObject() && !message_location.script().is_null()) {
+ rethrow_exception = Handle<JSObject>::cast(exception);
+
+ Factory* factory = isolate->factory();
+ Handle<String> start_pos_key =
+ factory->LookupAsciiSymbol("startPosition");
+ Handle<String> end_pos_key =
+ factory->LookupAsciiSymbol("endPosition");
+ Handle<String> script_obj_key =
+ factory->LookupAsciiSymbol("scriptObject");
+ Handle<Smi> start_pos(Smi::FromInt(message_location.start_pos()));
+ Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()));
+ Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
+ JSReceiver::SetProperty(
+ rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ }
+ }
+
+ // A logical 'finally' section.
isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
- return *(listener.GetResult());
+ if (rethrow_exception.is_null()) {
+ return *(listener.GetResult());
+ } else {
+ isolate->Throw(*rethrow_exception);
+ return 0;
+ }
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index aa1900b..a92bcca 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1745,6 +1745,16 @@
}
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
+ Object** p) {
+ Object* o = *p;
+ ASSERT(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ return !mark.Get();
+}
+
+
void MarkCompactCollector::MarkSymbolTable() {
SymbolTable* symbol_table = heap()->symbol_table();
// Mark the symbol table itself.
@@ -1773,54 +1783,6 @@
}
-void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups =
- heap()->isolate()->global_handles()->object_groups();
-
- int last = 0;
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_marked = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- if (mark.Get()) {
- group_marked = true;
- break;
- }
- }
- }
-
- if (!group_marked) {
- (*object_groups)[last++] = entry;
- continue;
- }
-
- // An object in the group is marked, so mark as grey all white heap
- // objects in the group.
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- MarkObject(heap_object, mark);
- }
- }
-
- // Once the entire group has been colored grey, set the object group
- // to NULL so it won't be processed again.
- entry->Dispose();
- object_groups->at(i) = NULL;
- }
- object_groups->Rewind(last);
-}
-
-
void MarkCompactCollector::MarkImplicitRefGroups() {
List<ImplicitRefGroup*>* ref_groups =
heap()->isolate()->global_handles()->implicit_ref_groups();
@@ -1939,11 +1901,12 @@
}
-void MarkCompactCollector::ProcessExternalMarking() {
+void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
- MarkObjectGroups();
+ heap()->isolate()->global_handles()->IterateObjectGroups(
+ visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
@@ -2022,7 +1985,7 @@
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic.
- ProcessExternalMarking();
+ ProcessExternalMarking(&root_visitor);
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
@@ -2041,7 +2004,7 @@
// Repeat host application specific marking to mark unmarked objects
// reachable from the weak roots.
- ProcessExternalMarking();
+ ProcessExternalMarking(&root_visitor);
AfterMarking();
}
diff --git a/src/mark-compact.h b/src/mark-compact.h
index b652e22..9a0b014 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -756,17 +756,13 @@
// symbol table are weak.
void MarkSymbolTable();
- // Mark objects in object groups that have at least one object in the
- // group marked.
- void MarkObjectGroups();
-
// Mark objects in implicit references groups if their parent object
// is marked.
void MarkImplicitRefGroups();
// Mark all objects which are reachable due to host application
// logic like object groups or implicit references' groups.
- void ProcessExternalMarking();
+ void ProcessExternalMarking(RootMarkingVisitor* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
@@ -790,6 +786,7 @@
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
+ static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index a5331a0..7f1a05a 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1844,10 +1844,14 @@
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
+ this.frame_id_ = frame.details_.frameId();
+ this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+ this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
+ this.index_ = index;
}
@@ -1867,6 +1871,22 @@
};
+ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
+ var raw_res;
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
+ this.inlined_frame_id_, this.index_, name, new_value);
+ } else {
+ raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
+ name, new_value);
+ }
+ if (!raw_res) {
+ throw new Error("Failed to set variable value");
+ }
+};
+
+
/**
* Mirror object for scope of frame or function. Either frame or function must
* be specified.
@@ -1914,6 +1934,11 @@
};
+ScopeMirror.prototype.setVariableValue = function(name, new_value) {
+ this.details_.setVariableValueImpl(name, new_value);
+};
+
+
/**
* Mirror object for script source.
* @param {Script} script The script object
diff --git a/src/objects.cc b/src/objects.cc
index 324b10c..c5ab315 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -7972,7 +7972,6 @@
ASSERT(code != NULL);
ASSERT(function->context()->native_context() == code_map->get(index - 1));
function->ReplaceCode(code);
- code->MakeYoung();
}
@@ -8841,14 +8840,6 @@
}
-void Code::MakeYoung() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence != NULL) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
- }
-}
-
-
void Code::MakeOlder(MarkingParity current_parity) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
@@ -9439,8 +9430,10 @@
// A non-configurable property will cause the truncation operation to
// stop at this index.
if (attributes == DONT_DELETE) break;
- // TODO(adamk): Don't fetch the old value if it's an accessor.
- old_values.Add(Object::GetElement(self, i));
+ old_values.Add(
+ self->GetLocalElementAccessorPair(i) == NULL
+ ? Object::GetElement(self, i)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value()));
indices.Add(isolate->factory()->Uint32ToString(i));
}
diff --git a/src/objects.h b/src/objects.h
index c476692..9737e7f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -4573,7 +4573,6 @@
// Code aging
static void MakeCodeAgeSequenceYoung(byte* sequence);
- void MakeYoung();
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 028aae3..24e256a 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -699,7 +699,7 @@
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
@@ -720,7 +720,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -775,11 +774,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -794,12 +788,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 3ec02b7..1da4605 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -685,7 +685,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -707,7 +707,6 @@
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -891,11 +890,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -910,12 +904,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 5609af0..ec48d63 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1025,7 +1025,6 @@
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
-
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
@@ -1040,7 +1039,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -1076,74 +1075,16 @@
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
-class CpuProfilerSignalHandler {
- public:
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- ScopedLock lock(mutex_);
- if (signal_handler_installed_counter_ > 0) {
- signal_handler_installed_counter_++;
- return;
- }
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) {
- signal_handler_installed_counter_++;
- }
- }
-
- static void RestoreSignalHandler() {
- ScopedLock lock(mutex_);
- if (signal_handler_installed_counter_ == 0)
- return;
- if (signal_handler_installed_counter_ == 1) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- }
- signal_handler_installed_counter_--;
- }
-
- static bool signal_handler_installed() {
- return signal_handler_installed_counter_ > 0;
- }
-
- private:
- static int signal_handler_installed_counter_;
- static struct sigaction old_signal_handler_;
- static Mutex* mutex_;
-};
-
-
-int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0;
-struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
-Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
-
-
class Sampler::PlatformData : public Malloced {
public:
- PlatformData()
- : vm_tgid_(getpid()),
- vm_tid_(GetThreadID()) {}
+ PlatformData() : vm_tid_(GetThreadID()) {}
- void SendProfilingSignal() {
- if (!CpuProfilerSignalHandler::signal_handler_installed()) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-#endif
- }
+ int vm_tid() const { return vm_tid_; }
private:
- const int vm_tgid_;
const int vm_tid_;
};
@@ -1159,11 +1100,28 @@
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+ vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
@@ -1184,6 +1142,7 @@
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
+ RestoreSignalHandler();
}
}
@@ -1195,13 +1154,18 @@
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
return;
}
Sleep(HALF_INTERVAL);
@@ -1211,7 +1175,8 @@
Sleep(HALF_INTERVAL);
} else {
if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
return;
}
}
@@ -1226,9 +1191,10 @@
}
}
- static void DoCpuProfile(Sampler* sampler, void*) {
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
- sampler->platform_data()->SendProfilingSignal();
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
@@ -1236,6 +1202,16 @@
sampler->isolate()->runtime_profiler()->NotifyTick();
}
+ void SendProfilingSignal(int tid) {
+ if (!signal_handler_installed_) return;
+ // Glibc doesn't provide a wrapper for tgkill(2).
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
+#else
+ syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
+#endif
+ }
+
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
@@ -1258,12 +1234,15 @@
#endif // ANDROID
}
+ const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
@@ -1272,6 +1251,8 @@
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
@@ -1299,13 +1280,11 @@
}
#endif
SignalSender::SetUp();
- CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
- CpuProfilerSignalHandler::TearDown();
delete limit_mutex;
}
@@ -1315,7 +1294,6 @@
interval_(interval),
profiling_(false),
active_(false),
- has_processing_thread_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
@@ -1327,11 +1305,6 @@
}
-void Sampler::DoSample() {
- platform_data()->SendProfilingSignal();
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -1346,14 +1319,4 @@
}
-void Sampler::StartSampling() {
- CpuProfilerSignalHandler::InstallSignalHandler();
-}
-
-
-void Sampler::StopSampling() {
- CpuProfilerSignalHandler::RestoreSignalHandler();
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 01dcb1d..22d2bcf 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -825,7 +825,7 @@
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
@@ -863,7 +863,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
thread_resume(profiled_thread);
}
@@ -916,11 +915,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -935,12 +929,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index a42c5d4..ccd2123 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -514,12 +514,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index b9f133e..292927b 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -738,7 +738,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -768,7 +768,6 @@
#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -971,11 +970,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -990,12 +984,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 7431e80..5652741 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -672,7 +672,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -686,7 +686,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
class Sampler::PlatformData : public Malloced {
@@ -890,11 +889,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -908,13 +902,4 @@
SetActive(false);
}
-
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 261a946..b3ca402 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -2054,7 +2054,7 @@
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
@@ -2075,7 +2075,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -2130,11 +2129,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -2149,12 +2143,4 @@
}
-void Sampler::StartSampling() {
-}
-
-
-void Sampler::StopSampling() {
-}
-
-
} } // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index 67f6792..6f75ca8 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -753,9 +753,6 @@
IncSamplesTaken();
}
- // Performs platform-specific stack sampling.
- void DoSample();
-
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
@@ -764,28 +761,10 @@
void Start();
void Stop();
- // Whether the sampling thread should use this Sampler for CPU profiling?
- bool IsProfiling() const {
- return NoBarrier_Load(&profiling_) > 0 &&
- !NoBarrier_Load(&has_processing_thread_);
- }
- // Perform platform-specific initialization before DoSample() may be invoked.
- void StartSampling();
- // Perform platform-specific cleanup after samping.
- void StopSampling();
- void IncreaseProfilingDepth() {
- if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) {
- StartSampling();
- }
- }
- void DecreaseProfilingDepth() {
- if (!NoBarrier_AtomicIncrement(&profiling_, -1)) {
- StopSampling();
- }
- }
- void SetHasProcessingThread(bool value) {
- NoBarrier_Store(&has_processing_thread_, value);
- }
+ // Is the sampler used for profiling?
+ bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
+ void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
+ void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
@@ -812,7 +791,6 @@
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
- Atomic32 has_processing_thread_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/src/runtime.cc b/src/runtime.cc
index 5106be8..5cf2d44 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2762,6 +2762,23 @@
}
+void FindTwoByteStringIndices(const Vector<const uc16> subject,
+ uc16 pattern,
+ ZoneList<int>* indices,
+ unsigned int limit,
+ Zone* zone) {
+ ASSERT(limit > 0);
+ const uc16* subject_start = subject.start();
+ const uc16* subject_end = subject_start + subject.length();
+ for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
+ if (*pos == pattern) {
+ indices->Add(static_cast<int>(pos - subject_start), zone);
+ limit--;
+ }
+ }
+}
+
+
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -2826,19 +2843,37 @@
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
if (pattern_content.IsAscii()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- indices,
- limit,
- zone);
+ Vector<const char> pattern_vector = pattern_content.ToAsciiVector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
} else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- indices,
- limit,
- zone);
+ Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
}
}
}
@@ -10867,6 +10902,52 @@
}
+// This method copies structure of MaterializeClosure method above.
+static bool SetClosureVariableValue(Isolate* isolate,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ ASSERT(context->IsFunctionContext());
+
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ // Context locals to the context extension.
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ Handle<String> next_name(scope_info->ContextLocalName(i));
+ if (variable_name->Equals(*next_name)) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ int context_index =
+ scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
+ if (context_index < 0) {
+ return false;
+ }
+ context->set(context_index, *new_value);
+ return true;
+ }
+ }
+
+ // Properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (context->has_extension()) {
+ Handle<JSObject> ext(JSObject::cast(context->extension()));
+ if (ext->HasProperty(*variable_name)) {
+ // We don't expect this to do anything except replacing property value.
+ SetProperty(isolate,
+ ext,
+ variable_name,
+ new_value,
+ NONE,
+ kNonStrictMode);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
// Create a plain JSObject which materializes the scope for the specified
// catch context.
static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
@@ -11147,6 +11228,33 @@
return Handle<JSObject>();
}
+ bool SetVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ ASSERT(!failed_);
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ break;
+ case ScopeIterator::ScopeTypeLocal:
+ // TODO(2399): implement.
+ break;
+ case ScopeIterator::ScopeTypeWith:
+ break;
+ case ScopeIterator::ScopeTypeCatch:
+ // TODO(2399): implement.
+ break;
+ case ScopeIterator::ScopeTypeClosure:
+ return SetClosureVariableValue(isolate_, CurrentContext(),
+ variable_name, new_value);
+ case ScopeIterator::ScopeTypeBlock:
+ // TODO(2399): should we implement it?
+ break;
+ case ScopeIterator::ScopeTypeModule:
+ // TODO(2399): should we implement it?
+ break;
+ }
+ return false;
+ }
+
Handle<ScopeInfo> CurrentScopeInfo() {
ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
@@ -11386,6 +11494,64 @@
}
+static bool SetScopeVariableValue(ScopeIterator* it, int index,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ for (int n = 0; !it->Done() && n < index; it->Next()) {
+ n++;
+ }
+ if (it->Done()) {
+ return false;
+ }
+ return it->SetVariableValue(variable_name, new_value);
+}
+
+
+// Change variable value in closure or local scope
+// args[0]: number or JsFunction: break id or function
+// args[1]: number: frame index (when arg[0] is break id)
+// args[2]: number: inlined frame index (when arg[0] is break id)
+// args[3]: number: scope index
+// args[4]: string: variable name
+// args[5]: object: new value
+//
+// Return true if success and false otherwise
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 6);
+
+ // Check arguments.
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
+ CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
+ Handle<Object> new_value = args.at<Object>(5);
+
+ bool res;
+ if (args[0]->IsNumber()) {
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ ScopeIterator it(isolate, frame, inlined_jsframe_index);
+ res = SetScopeVariableValue(&it, index, variable_name, new_value);
+ } else {
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ ScopeIterator it(isolate, fun);
+ res = SetScopeVariableValue(&it, index, variable_name, new_value);
+ }
+
+ return isolate->heap()->ToBoolean(res);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
diff --git a/src/runtime.h b/src/runtime.h
index 19ff62d..9d53c35 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -433,6 +433,7 @@
F(GetScopeDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
+ F(SetScopeVariableValue, 6, 1) \
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 1f708b3..bfed6bb 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1562,6 +1562,7 @@
while (!object.is_identical_to(holder)) {
if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
+ if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
++depth;
}
if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
diff --git a/src/version.cc b/src/version.cc
index c54ba27..5b25237 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 15
-#define BUILD_NUMBER 7
-#define PATCH_LEVEL 2
+#define BUILD_NUMBER 8
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0