Version 2.5.1
Fixed bug causing spurious out of memory exceptions (issue http://crbug.com/54580).
Fixed compilation error on Solaris platform (issue 901).
Fixed error in strtod (string to floating point number conversion) due to glibc's use of 80-bit floats in the FPU on 32-bit linux.
Adjusted randomized allocations of executable memory to have 64k granularity (issue http://crbug.com/56036).
Supported profiling using kernel perf_events on linux. Added ll_prof script to tools and --ll-prof flag to V8.
git-svn-id: http://v8.googlecode.com/svn/trunk@5675 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index e22ed11..8995d48 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -116,7 +116,6 @@
variables.cc
version.cc
virtual-frame.cc
- vm-state.cc
zone.cc
"""),
'arch:arm': Split("""
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c9ef29a..b3b0766 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -935,11 +935,8 @@
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, ¬_two_smis);
- __ sub(r0, r1, r0, SetCC);
- __ b(vc, &smi_done);
- // Correct the sign in case of overflow.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ bind(&smi_done);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
__ Ret();
__ bind(¬_two_smis);
} else if (FLAG_debug_code) {
@@ -2300,13 +2297,7 @@
void StackCheckStub::Generate(MacroAssembler* masm) {
- // Do tail-call to runtime routine. Runtime routines expect at least one
- // argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-
- __ Ret();
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 8f45886..37bb1f0 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -142,7 +142,6 @@
void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
if (by != 0) {
- Label inside_string;
__ add(current_input_offset(),
current_input_offset(), Operand(by * char_size()));
}
@@ -927,6 +926,19 @@
}
+void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ cmp(current_input_offset(), Operand(-by * char_size()));
+ __ b(ge, &after_position);
+ __ mov(current_input_offset(), Operand(-by * char_size()));
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(r0, Operand(to));
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 93a74d7..4e09f67 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -100,6 +100,7 @@
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/ast.cc b/src/ast.cc
index f47dffd..92f1496 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -398,39 +398,70 @@
}
-bool RegExpAssertion::IsAnchored() {
+bool RegExpAssertion::IsAnchoredAtStart() {
return type() == RegExpAssertion::START_OF_INPUT;
}
-bool RegExpAlternative::IsAnchored() {
+bool RegExpAssertion::IsAnchoredAtEnd() {
+ return type() == RegExpAssertion::END_OF_INPUT;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtStart() {
ZoneList<RegExpTree*>* nodes = this->nodes();
for (int i = 0; i < nodes->length(); i++) {
RegExpTree* node = nodes->at(i);
- if (node->IsAnchored()) { return true; }
+ if (node->IsAnchoredAtStart()) { return true; }
if (node->max_match() > 0) { return false; }
}
return false;
}
-bool RegExpDisjunction::IsAnchored() {
+bool RegExpAlternative::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* nodes = this->nodes();
+ for (int i = nodes->length() - 1; i >= 0; i--) {
+ RegExpTree* node = nodes->at(i);
+ if (node->IsAnchoredAtEnd()) { return true; }
+ if (node->max_match() > 0) { return false; }
+ }
+ return false;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtStart() {
ZoneList<RegExpTree*>* alternatives = this->alternatives();
for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchored())
+ if (!alternatives->at(i)->IsAnchoredAtStart())
return false;
}
return true;
}
-bool RegExpLookahead::IsAnchored() {
- return is_positive() && body()->IsAnchored();
+bool RegExpDisjunction::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ for (int i = 0; i < alternatives->length(); i++) {
+ if (!alternatives->at(i)->IsAnchoredAtEnd())
+ return false;
+ }
+ return true;
}
-bool RegExpCapture::IsAnchored() {
- return body()->IsAnchored();
+bool RegExpLookahead::IsAnchoredAtStart() {
+ return is_positive() && body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtStart() {
+ return body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtEnd() {
+ return body()->IsAnchoredAtEnd();
}
diff --git a/src/ast.h b/src/ast.h
index e8d54e4..a01e48d 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1523,7 +1523,8 @@
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0;
virtual bool IsTextElement() { return false; }
- virtual bool IsAnchored() { return false; }
+ virtual bool IsAnchoredAtStart() { return false; }
+ virtual bool IsAnchoredAtEnd() { return false; }
virtual int min_match() = 0;
virtual int max_match() = 0;
// Returns the interval of registers used for captures within this
@@ -1548,7 +1549,8 @@
virtual RegExpDisjunction* AsDisjunction();
virtual Interval CaptureRegisters();
virtual bool IsDisjunction();
- virtual bool IsAnchored();
+ virtual bool IsAnchoredAtStart();
+ virtual bool IsAnchoredAtEnd();
virtual int min_match() { return min_match_; }
virtual int max_match() { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
@@ -1568,7 +1570,8 @@
virtual RegExpAlternative* AsAlternative();
virtual Interval CaptureRegisters();
virtual bool IsAlternative();
- virtual bool IsAnchored();
+ virtual bool IsAnchoredAtStart();
+ virtual bool IsAnchoredAtEnd();
virtual int min_match() { return min_match_; }
virtual int max_match() { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; }
@@ -1595,7 +1598,8 @@
RegExpNode* on_success);
virtual RegExpAssertion* AsAssertion();
virtual bool IsAssertion();
- virtual bool IsAnchored();
+ virtual bool IsAnchoredAtStart();
+ virtual bool IsAnchoredAtEnd();
virtual int min_match() { return 0; }
virtual int max_match() { return 0; }
Type type() { return type_; }
@@ -1768,7 +1772,8 @@
RegExpCompiler* compiler,
RegExpNode* on_success);
virtual RegExpCapture* AsCapture();
- virtual bool IsAnchored();
+ virtual bool IsAnchoredAtStart();
+ virtual bool IsAnchoredAtEnd();
virtual Interval CaptureRegisters();
virtual bool IsCapture();
virtual int min_match() { return body_->min_match(); }
@@ -1800,7 +1805,7 @@
virtual RegExpLookahead* AsLookahead();
virtual Interval CaptureRegisters();
virtual bool IsLookahead();
- virtual bool IsAnchored();
+ virtual bool IsAnchoredAtStart();
virtual int min_match() { return 0; }
virtual int max_match() { return 0; }
RegExpTree* body() { return body_; }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index aa8d8e5..d7491e1 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1814,6 +1814,11 @@
i::Counters::contexts_created_from_scratch.Increment();
}
+ // Add this context to the weak list of global contexts.
+ (*global_context_)->set(Context::NEXT_CONTEXT_LINK,
+ Heap::global_contexts_list());
+ Heap::set_global_contexts_list(*global_context_);
+
result_ = global_context_;
}
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index bcb34c8..93218ea 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -88,7 +88,8 @@
V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */
+V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \
+V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code;
diff --git a/src/contexts.h b/src/contexts.h
index 78dda6a..9722a93 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -225,7 +225,15 @@
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
- GLOBAL_CONTEXT_SLOTS
+
+ // Properties from here are treated as weak references by the full GC.
+ // Scavenge treats them as strong references.
+ NEXT_CONTEXT_LINK,
+
+ // Total number of slots.
+ GLOBAL_CONTEXT_SLOTS,
+
+ FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
};
// Direct slot access.
@@ -333,6 +341,17 @@
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
+ static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
+
+ // GC support.
+ typedef FixedBodyDescriptor<
+ kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
+
+ typedef FixedBodyDescriptor<
+ kHeaderSize,
+ kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
+ kSize> MarkCompactBodyDescriptor;
+
private:
// Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index acf3349..da19a45 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -188,6 +188,20 @@
}
+void ProfilerEventsProcessor::ProcessMovedFunctions() {
+ for (int i = 0; i < moved_functions_.length(); ++i) {
+ JSFunction* function = moved_functions_[i];
+ CpuProfiler::FunctionCreateEvent(function);
+ }
+ moved_functions_.Clear();
+}
+
+
+void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
+ moved_functions_.Add(function);
+}
+
+
void ProfilerEventsProcessor::RegExpCodeCreateEvent(
Logger::LogEventsAndTags tag,
const char* prefix,
@@ -426,8 +440,12 @@
}
-void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
- HeapObject* source) {
+void CpuProfiler::ProcessMovedFunctions() {
+ singleton_->processor_->ProcessMovedFunctions();
+}
+
+
+void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
// This function is called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
@@ -436,27 +454,7 @@
if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
|| singleton_->processor_->IsKnownFunction(function->address())) return;
- int security_token_id = TokenEnumerator::kNoSecurityToken;
- // In debug mode, assertions may fail for contexts,
- // and we can live without security tokens in debug mode.
-#ifndef DEBUG
- if (function->unchecked_context()->IsContext()) {
- security_token_id = singleton_->token_enumerator_->GetTokenId(
- function->context()->global_context()->security_token());
- }
- // Security token may not be moved yet.
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
- if (old_function->unchecked_context()->IsContext()) {
- security_token_id = singleton_->token_enumerator_->GetTokenId(
- old_function->context()->global_context()->security_token());
- }
- }
-#endif
- singleton_->processor_->FunctionCreateEvent(
- function->address(),
- function->unchecked_code()->address(),
- security_token_id);
+ singleton_->processor_->RememberMovedFunction(function);
}
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 86f9f67..d3158d7 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -165,6 +165,8 @@
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
bool IsKnownFunction(Address start);
+ void ProcessMovedFunctions();
+ void RememberMovedFunction(JSFunction* function);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -202,6 +204,7 @@
// Used from the VM thread.
HashMap* known_functions_;
+ List<JSFunction*> moved_functions_;
};
} } // namespace v8::internal
@@ -251,17 +254,18 @@
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
+ static void CodeMovingGCEvent() {}
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function);
// Reports function creation in case we had missed it (e.g.
// if it was created from compiled code).
- static void FunctionCreateEventFromMove(JSFunction* function,
- HeapObject* source);
+ static void FunctionCreateEventFromMove(JSFunction* function);
static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
+ static void ProcessMovedFunctions();
static void SetterCallbackEvent(String* name, Address entry_point);
static INLINE(bool is_profiling()) {
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 84a0eaa..2474c62 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -412,6 +412,7 @@
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
+DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//
// Heap protection flags
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 27a14bc..104292d 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -76,7 +76,7 @@
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
Heap::allocation_timeout_-- <= 0) {
- return Failure::RetryAfterGC(size_in_bytes, space);
+ return Failure::RetryAfterGC(space);
}
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
@@ -389,8 +389,12 @@
}
+#ifdef DEBUG
#define GC_GREEDY_CHECK() \
- ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
+ if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck()
+#else
+#define GC_GREEDY_CHECK() { }
+#endif
// Calls the FUNCTION_CALL function and retries it up to three times
@@ -409,8 +413,7 @@
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
} \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
- Heap::CollectGarbage(Failure::cast(__object__)->requested(), \
- Failure::cast(__object__)->allocation_space()); \
+ Heap::CollectGarbage(Failure::cast(__object__)->allocation_space()); \
__object__ = FUNCTION_CALL; \
if (!__object__->IsFailure()) RETURN_VALUE; \
if (__object__->IsOutOfMemoryFailure()) { \
diff --git a/src/heap.cc b/src/heap.cc
index 23bfbd8..675639a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -54,6 +54,7 @@
String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength];
+Object* Heap::global_contexts_list_;
NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL;
@@ -420,7 +421,7 @@
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
MarkCompactCollector::SetForceCompaction(force_compaction);
- CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
+ CollectGarbage(OLD_POINTER_SPACE, collectionPolicy);
MarkCompactCollector::SetForceCompaction(false);
}
@@ -431,8 +432,7 @@
}
-bool Heap::CollectGarbage(int requested_size,
- AllocationSpace space,
+void Heap::CollectGarbage(AllocationSpace space,
CollectionPolicy collectionPolicy) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@@ -469,25 +469,8 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) HeapProfiler::WriteSample();
+ if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif
-
- switch (space) {
- case NEW_SPACE:
- return new_space_.Available() >= requested_size;
- case OLD_POINTER_SPACE:
- return old_pointer_space_->Available() >= requested_size;
- case OLD_DATA_SPACE:
- return old_data_space_->Available() >= requested_size;
- case CODE_SPACE:
- return code_space_->Available() >= requested_size;
- case MAP_SPACE:
- return map_space_->Available() >= requested_size;
- case CELL_SPACE:
- return cell_space_->Available() >= requested_size;
- case LO_SPACE:
- return lo_space_->Available() >= requested_size;
- }
- return false;
}
@@ -542,27 +525,27 @@
while (gc_performed) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
- Heap::CollectGarbage(new_space_size, NEW_SPACE);
+ Heap::CollectGarbage(NEW_SPACE);
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
- Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
+ Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
- Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
+ Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
- Heap::CollectGarbage(code_space_size, CODE_SPACE);
+ Heap::CollectGarbage(CODE_SPACE);
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
- Heap::CollectGarbage(map_space_size, MAP_SPACE);
+ Heap::CollectGarbage(MAP_SPACE);
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
- Heap::CollectGarbage(cell_space_size, CELL_SPACE);
+ Heap::CollectGarbage(CELL_SPACE);
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
@@ -574,7 +557,7 @@
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
- Heap::CollectGarbage(large_object_size, LO_SPACE);
+ Heap::CollectGarbage(LO_SPACE);
gc_performed = true;
}
}
@@ -624,19 +607,14 @@
}
-class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
- virtual void VisitThread(ThreadLocalTop* top) {
- Context* context = top->context_;
- if (context == NULL) return;
- context->global()->global_context()->normalized_map_cache()->Clear();
- }
-};
-
-
void Heap::ClearNormalizedMapCaches() {
if (Bootstrapper::IsActive()) return;
- ClearThreadNormalizedMapCachesVisitor visitor;
- ThreadManager::IterateArchivedThreads(&visitor);
+
+ Object* context = global_contexts_list_;
+ while (!context->IsUndefined()) {
+ Context::cast(context)->normalized_map_cache()->Clear();
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ }
}
@@ -685,6 +663,10 @@
void Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer,
CollectionPolicy collectionPolicy) {
+ if (collector != SCAVENGER) {
+ PROFILE(CodeMovingGCEvent());
+ }
+
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
@@ -1034,6 +1016,9 @@
}
}
+ // Scavenge object reachable from the global contexts list directly.
+ scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
+
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
@@ -1101,6 +1086,44 @@
}
+void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
+ Object* head = undefined_value();
+ Context* tail = NULL;
+ Object* candidate = global_contexts_list_;
+ while (!candidate->IsUndefined()) {
+ // Check whether to keep the candidate in the list.
+ Context* candidate_context = reinterpret_cast<Context*>(candidate);
+ Object* retain = retainer->RetainAs(candidate);
+ if (retain != NULL) {
+ if (head->IsUndefined()) {
+ // First element in the list.
+ head = candidate_context;
+ } else {
+ // Subsequent elements in the list.
+ ASSERT(tail != NULL);
+ tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ candidate_context,
+ UPDATE_WRITE_BARRIER);
+ }
+ // Retained context is new tail.
+ tail = candidate_context;
+ }
+ // Move to next element in the list.
+ candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
+ }
+
+ // Terminate the list if there is one or more elements.
+ if (tail != NULL) {
+ tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ Heap::undefined_value(),
+ UPDATE_WRITE_BARRIER);
+ }
+
+ // Update the head of the list of contexts.
+ Heap::global_contexts_list_ = head;
+}
+
+
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
static inline void VisitPointer(Object** p) {
@@ -1157,6 +1180,9 @@
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+ table_.Register(kVisitGlobalContext,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ VisitSpecialized<Context::kSize>);
typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
@@ -1235,7 +1261,7 @@
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
if (target->IsJSFunction()) {
PROFILE(FunctionMoveEvent(source->address(), target->address()));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
}
}
#endif
@@ -1647,7 +1673,9 @@
obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
- set_global_context_map(Map::cast(obj));
+ Map* global_context_map = Map::cast(obj);
+ global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
+ set_global_context_map(global_context_map);
obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize);
@@ -3431,7 +3459,7 @@
HistogramTimerScope scope(&Counters::gc_context);
CollectAllGarbage(false);
} else {
- CollectGarbage(0, NEW_SPACE);
+ CollectGarbage(NEW_SPACE);
}
new_space_.Shrink();
last_gc_count = gc_count_;
@@ -4236,6 +4264,8 @@
// Create initial objects
if (!CreateInitialObjects()) return false;
+
+ global_contexts_list_ = undefined_value();
}
LOG(IntPtrTEvent("heap-capacity", Capacity()));
@@ -4937,11 +4967,11 @@
#ifdef DEBUG
-bool Heap::GarbageCollectionGreedyCheck() {
+void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
- if (Bootstrapper::IsActive()) return true;
- if (disallow_allocation_failure()) return true;
- return CollectGarbage(0, NEW_SPACE);
+ if (Bootstrapper::IsActive()) return;
+ if (disallow_allocation_failure()) return;
+ CollectGarbage(NEW_SPACE);
}
#endif
diff --git a/src/heap.h b/src/heap.h
index b1ef19f..6d32a4b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -202,9 +202,10 @@
V(closure_symbol, "(closure)")
-// Forward declaration of the GCTracer class.
+// Forward declarations.
class GCTracer;
class HeapStats;
+class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
@@ -696,8 +697,7 @@
// Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection.
- static bool CollectGarbage(int required_space,
- AllocationSpace space,
+ static void CollectGarbage(AllocationSpace space,
CollectionPolicy collectionPolicy = NORMAL);
// Performs a full garbage collection. Force compaction if the
@@ -717,7 +717,7 @@
#ifdef DEBUG
// Utility used with flag gc-greedy.
- static bool GarbageCollectionGreedyCheck();
+ static void GarbageCollectionGreedyCheck();
#endif
static void AddGCPrologueCallback(
@@ -767,6 +767,11 @@
// not match the empty string.
static String* hidden_symbol() { return hidden_symbol_; }
+ static void set_global_contexts_list(Object* object) {
+ global_contexts_list_ = object;
+ }
+ static Object* global_contexts_list() { return global_contexts_list_; }
+
// Iterates over all roots in the heap.
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
@@ -870,6 +875,11 @@
// Generated code can embed this address to get access to the roots.
static Object** roots_address() { return roots_; }
+ // Get address of global contexts list for serialization support.
+ static Object** global_contexts_list_address() {
+ return &global_contexts_list_;
+ }
+
#ifdef DEBUG
static void Print();
static void PrintHandles();
@@ -1051,6 +1061,8 @@
static void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
+ static void ProcessWeakReferences(WeakObjectRetainer* retainer);
+
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
@@ -1157,6 +1169,8 @@
static Object* roots_[kRootListLength];
+ static Object* global_contexts_list_;
+
struct StringTypeTable {
InstanceType type;
int size;
@@ -2043,6 +2057,19 @@
static List<Object*> old_space_strings_;
};
+
+// Abstract base class for checking whether a weak object should be retained.
+class WeakObjectRetainer {
+ public:
+ virtual ~WeakObjectRetainer() {}
+
+ // Return whether this object should be retained. If NULL is returned the
+ // object has no references. Otherwise the address of the retained object
+ // should be returned as in some GC situations the object has been moved.
+ virtual Object* RetainAs(Object* object) = 0;
+};
+
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3e2b7ae..348bb14 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2638,7 +2638,7 @@
__ j(not_zero, &non_smi, not_taken);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done);
- __ neg(edx); // Correct sign in case of overflow.
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
__ bind(&smi_done);
__ mov(eax, edx);
__ ret(0);
@@ -2964,16 +2964,7 @@
void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(eax);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(eax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 2aab7a8..e2853e8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -133,7 +133,6 @@
void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
if (by != 0) {
- Label inside_string;
__ add(Operand(edi), Immediate(by * char_size()));
}
}
@@ -964,6 +963,17 @@
__ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
}
+void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
+ NearLabel after_position;
+ __ cmp(edi, -by * char_size());
+ __ j(greater_equal, &after_position);
+ __ mov(edi, -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8b8eeed..51e2cb0 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -98,6 +98,7 @@
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index a904447..c9c3cc4 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -607,6 +607,15 @@
pc = code_base + Load32Aligned(pc + 4);
}
break;
+ BYTECODE(SET_CURRENT_POSITION_FROM_END) {
+ int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
+ if (subject.length() - current > by) {
+ current = subject.length() - by;
+ current_char = subject[current - 1];
+ }
+ pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
+ break;
+ }
default:
UNREACHABLE();
break;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 82a370f..3c5ddfb 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -5180,7 +5180,10 @@
&compiler,
compiler.accept());
RegExpNode* node = captured_body;
- if (!data->tree->IsAnchored()) {
+ bool is_end_anchored = data->tree->IsAnchoredAtEnd();
+ bool is_start_anchored = data->tree->IsAnchoredAtStart();
+ int max_length = data->tree->max_match();
+ if (!is_start_anchored) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning.
RegExpNode* loop_node =
@@ -5236,6 +5239,15 @@
RegExpMacroAssemblerIrregexp macro_assembler(codes);
#endif // V8_INTERPRETED_REGEXP
+ // Inserted here, instead of in Assembler, because it depends on information
+ // in the AST that isn't replicated in the Node structure.
+ static const int kMaxBacksearchLimit = 1024;
+ if (is_end_anchored &&
+ !is_start_anchored &&
+ max_length < kMaxBacksearchLimit) {
+ macro_assembler.SetCurrentPositionFromEnd(max_length);
+ }
+
return compiler.Assemble(¯o_assembler,
node,
data->capture_count,
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 62f0ca6..d6d8754 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -122,6 +122,7 @@
bool Log::is_stopped_ = false;
Log::WritePtr Log::Write = NULL;
FILE* Log::output_handle_ = NULL;
+FILE* Log::output_code_handle_ = NULL;
LogDynamicBuffer* Log::output_buffer_ = NULL;
// Must be the same message as in Logger::PauseProfiler.
const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
@@ -143,9 +144,22 @@
}
+static const char kCodeLogExt[] = ".code";
+
+
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ if (FLAG_ll_prof) {
+ // Open a file for logging the contents of code objects so that
+ // they can be disassembled later.
+ size_t name_len = strlen(name);
+ ScopedVector<char> code_name(
+ static_cast<int>(name_len + sizeof(kCodeLogExt)));
+ memcpy(code_name.start(), name, name_len);
+ memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
+ output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
+ }
Write = WriteToFile;
Init();
}
@@ -165,6 +179,8 @@
if (Write == WriteToFile) {
if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
+ if (output_code_handle_ != NULL) fclose(output_code_handle_);
+ output_code_handle_ = NULL;
} else if (Write == WriteToMemory) {
delete output_buffer_;
output_buffer_ = NULL;
diff --git a/src/log-utils.h b/src/log-utils.h
index a4dde21..ffea928 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -152,6 +152,9 @@
// mutex_ should be acquired before using output_handle_ or output_buffer_.
static FILE* output_handle_;
+ // Used when low-level profiling is active to save code object contents.
+ static FILE* output_code_handle_;
+
static LogDynamicBuffer* output_buffer_;
// Size of dynamic buffer block (and dynamic buffer initial size).
@@ -171,6 +174,7 @@
// mutex_ should be acquired before using it.
static char* message_buffer_;
+ friend class Logger;
friend class LogMessageBuilder;
friend class LogRecordCompressor;
};
diff --git a/src/log.cc b/src/log.cc
index 4230cba..1b0fdeb 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -191,11 +191,12 @@
~Ticker() { if (IsActive()) Stop(); }
- void SampleStack(TickSample* sample) {
+ virtual void SampleStack(TickSample* sample) {
+ ASSERT(IsSynchronous());
StackTracer::Trace(sample);
}
- void Tick(TickSample* sample) {
+ virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
if (window_) window_->AddState(sample->state);
}
@@ -765,6 +766,7 @@
msg.Append(*p);
}
msg.Append('"');
+ LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -784,6 +786,7 @@
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+ LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -808,6 +811,7 @@
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s %s:%d\"",
code->ExecutableSize(), *str, *sourcestr, line);
+ LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -825,6 +829,7 @@
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+ LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -835,6 +840,17 @@
}
+void Logger::CodeMovingGCEvent() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+ LogMessageBuilder msg;
+ msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
+ msg.WriteToLogFile();
+ OS::SignalCodeMovingGC();
+#endif
+}
+
+
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
@@ -845,6 +861,7 @@
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
msg.Append('\"');
+ LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -909,8 +926,7 @@
}
-void Logger::FunctionCreateEventFromMove(JSFunction* function,
- HeapObject*) {
+void Logger::FunctionCreateEventFromMove(JSFunction* function) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
FunctionCreateEvent(function);
@@ -1340,6 +1356,34 @@
}
+void Logger::LogCodeInfo() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+#if V8_TARGET_ARCH_IA32
+ const char arch[] = "ia32";
+#elif V8_TARGET_ARCH_X64
+ const char arch[] = "x64";
+#elif V8_TARGET_ARCH_ARM
+ const char arch[] = "arm";
+#else
+ const char arch[] = "unknown";
+#endif
+ LogMessageBuilder msg;
+ msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
+ msg.WriteToLogFile();
+#endif // ENABLE_LOGGING_AND_PROFILING
+}
+
+
+void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
+ if (!FLAG_ll_prof || Log::output_code_handle_ == NULL) return;
+ int pos = static_cast<int>(ftell(Log::output_code_handle_));
+ fwrite(code->instruction_start(), 1, code->instruction_size(),
+ Log::output_code_handle_);
+ msg->Append(",%d", pos);
+}
+
+
void Logger::LogCodeObjects() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
@@ -1451,6 +1495,12 @@
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
+ // --ll-prof implies --log-code and --log-snapshot-positions.
+ if (FLAG_ll_prof) {
+ FLAG_log_code = true;
+ FLAG_log_snapshot_positions = true;
+ }
+
// --prof_lazy controls --log-code, implies --noprof_auto.
if (FLAG_prof_lazy) {
FLAG_log_code = false;
@@ -1512,6 +1562,8 @@
ASSERT(VMState::is_outermost_external());
+ if (FLAG_ll_prof) LogCodeInfo();
+
ticker_ = new Ticker(kSamplingIntervalMs);
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
diff --git a/src/log.h b/src/log.h
index e513737..3a4d79b 100644
--- a/src/log.h
+++ b/src/log.h
@@ -91,6 +91,7 @@
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
+ V(CODE_MOVING_GC, "code-moving-gc", "cg") \
V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
@@ -209,6 +210,7 @@
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
String* source, int line);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+ static void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
static void RegExpCodeCreateEvent(Code* code, String* source);
// Emits a code move event.
@@ -217,8 +219,7 @@
static void CodeDeleteEvent(Address from);
// Emits a function object create event.
static void FunctionCreateEvent(JSFunction* function);
- static void FunctionCreateEventFromMove(JSFunction* function,
- HeapObject*);
+ static void FunctionCreateEventFromMove(JSFunction* function);
// Emits a function move event.
static void FunctionMoveEvent(Address from, Address to);
// Emits a function delete event.
@@ -317,6 +318,12 @@
// Used for logging stubs found in the snapshot.
static void LogCodeObject(Object* code_object);
+ // Emits general information about generated code.
+ static void LogCodeInfo();
+
+ // Handles code creation when low-level profiling is active.
+ static void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+
// Emits a profiler tick event. Used by the profiler thread.
static void TickEvent(TickSample* sample, bool overflow);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 26f88cf..ad928ea 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -282,6 +282,11 @@
FixedArray::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitGlobalContext,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ Context::MarkCompactBodyDescriptor,
+ void>::Visit);
+
table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@@ -578,6 +583,7 @@
VisitPointers(SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kSize));
+
#undef SLOT_ADDR
}
@@ -738,6 +744,21 @@
};
+// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
+// are retained.
+class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ virtual Object* RetainAs(Object* object) {
+ MapWord first_word = HeapObject::cast(object)->map_word();
+ if (first_word.IsMarked()) {
+ return object;
+ } else {
+ return NULL;
+ }
+ }
+};
+
+
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked());
ASSERT(Heap::Contains(object));
@@ -1069,6 +1090,10 @@
ExternalStringTable::Iterate(&v);
ExternalStringTable::CleanUp();
+ // Process the weak references.
+ MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+ Heap::ProcessWeakReferences(&mark_compact_object_retainer);
+
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
}
@@ -1639,6 +1664,9 @@
}
}
+ // Update pointer from the global contexts list.
+ updating_visitor.VisitPointer(Heap::global_contexts_list_address());
+
// Update pointers from external string table.
Heap::UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -2245,6 +2273,9 @@
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
+ // Update the pointer to the head of the weak list of global contexts.
+ updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
@@ -2522,7 +2553,7 @@
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@@ -2615,7 +2646,7 @@
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
+ PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f63d672..11f9d34 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -844,15 +844,6 @@
}
-int Failure::requested() const {
- const int kShiftBits =
- kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
- STATIC_ASSERT(kShiftBits >= 0);
- ASSERT(type() == RETRY_AFTER_GC);
- return static_cast<int>(value() >> kShiftBits);
-}
-
-
AllocationSpace Failure::allocation_space() const {
ASSERT_EQ(RETRY_AFTER_GC, type());
return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
@@ -881,20 +872,14 @@
}
-Failure* Failure::RetryAfterGC(int requested_bytes) {
- // Assert that the space encoding fits in the three bytes allotted for it.
- ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
- uintptr_t requested =
- static_cast<uintptr_t>(requested_bytes >> kObjectAlignmentBits);
- int tag_bits = kSpaceTagSize + kFailureTypeTagSize + kFailureTagSize;
- if (((requested << tag_bits) >> tag_bits) != requested) {
- // No room for entire requested size in the bits. Round down to
- // maximally representable size.
- requested = static_cast<intptr_t>(
- (~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
- }
- int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
- return Construct(RETRY_AFTER_GC, value);
+Failure* Failure::RetryAfterGC() {
+ return RetryAfterGC(NEW_SPACE);
+}
+
+
+Failure* Failure::RetryAfterGC(AllocationSpace space) {
+ ASSERT((space & ~kSpaceTagMask) == 0);
+ return Construct(RETRY_AFTER_GC, space);
}
@@ -1485,6 +1470,15 @@
}
+void FixedArray::set_unchecked(int index,
+ Object* value,
+ WriteBarrierMode mode) {
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+}
+
+
void FixedArray::set_null_unchecked(int index) {
ASSERT(index >= 0 && index < this->length());
ASSERT(!Heap::InNewSpace(Heap::null_value()));
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 90f7ce0..ed76cb9 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -50,6 +50,7 @@
kVisitShortcutCandidate,
kVisitByteArray,
kVisitFixedArray,
+ kVisitGlobalContext,
// For data objects, JS objects and structs along with generic visitor which
// can visit object of any size we provide visitors specialized by
@@ -263,6 +264,11 @@
FixedArray::BodyDescriptor,
int>::Visit);
+ table_.Register(kVisitGlobalContext,
+ &FixedBodyVisitor<StaticVisitor,
+ Context::ScavengeBodyDescriptor,
+ int>::Visit);
+
table_.Register(kVisitByteArray, &VisitByteArray);
table_.Register(kVisitSharedFunctionInfo,
diff --git a/src/objects.cc b/src/objects.cc
index 59ed1de..ac20b2e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -574,28 +574,6 @@
}
-Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
- ASSERT((space & ~kSpaceTagMask) == 0);
- // TODO(X64): Stop using Smi validation for non-smi checks, even if they
- // happen to be identical at the moment.
-
- int requested = requested_bytes >> kObjectAlignmentBits;
- int value = (requested << kSpaceTagSize) | space;
- // We can't very well allocate a heap number in this situation, and if the
- // requested memory is so large it seems reasonable to say that this is an
- // out of memory situation. This fixes a crash in
- // js1_5/Regress/regress-303213.js.
- if (value >> kSpaceTagSize != requested ||
- !Smi::IsValid(value) ||
- value != ((value << kFailureTypeTagSize) >> kFailureTypeTagSize) ||
- !Smi::IsValid(value << kFailureTypeTagSize)) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- return Construct(RETRY_AFTER_GC, value);
-}
-
-
// Should a word be prefixed by 'a' or 'an' in order to read naturally in
// English? Returns false for non-ASCII or words that don't start with
// a capital letter. The a/an rule follows pronunciation in English.
@@ -8591,7 +8569,9 @@
details = PropertyDetails(details.attributes(),
details.type(),
DetailsAt(entry).index());
- SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details);
+ Object* object_key = NumberDictionaryShape::AsObject(key);
+ if (object_key->IsFailure()) return object_key;
+ SetEntry(entry, object_key, value, details);
return this;
}
diff --git a/src/objects.h b/src/objects.h
index e284454..d917a57 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -794,7 +794,7 @@
//
// Failures are a single word, encoded as follows:
// +-------------------------+---+--+--+
-// |...rrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// |.........unused..........|sss|tt|11|
// +-------------------------+---+--+--+
// 7 6 4 32 10
//
@@ -810,11 +810,6 @@
// allocation space tag is 000 for all failure types except
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
// allocation spaces (the encoding is found in globals.h).
-//
-// The remaining bits is the size of the allocation request in units
-// of the pointer size, and is zeroed except for RETRY_AFTER_GC
-// failures. The 25 bits (on a 32 bit platform) gives a representable
-// range of 2^27 bytes (128MB).
// Failure type tag info.
const int kFailureTypeTagSize = 2;
@@ -836,15 +831,11 @@
// Returns the space that needs to be collected for RetryAfterGC failures.
inline AllocationSpace allocation_space() const;
- // Returns the number of bytes requested (up to the representable maximum)
- // for RetryAfterGC failures.
- inline int requested() const;
-
inline bool IsInternalError() const;
inline bool IsOutOfMemoryException() const;
- static Failure* RetryAfterGC(int requested_bytes, AllocationSpace space);
- static inline Failure* RetryAfterGC(int requested_bytes); // NEW_SPACE
+ static inline Failure* RetryAfterGC(AllocationSpace space);
+ static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception();
static inline Failure* InternalError();
static inline Failure* OutOfMemoryException();
@@ -1760,6 +1751,7 @@
// Setters with less debug checks for the GC to use.
inline void set_unchecked(int index, Smi* value);
inline void set_null_unchecked(int index);
+ inline void set_unchecked(int index, Object* value, WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index ae44944..1003de1 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -291,6 +291,10 @@
}
+void OS::SignalCodeMovingGC() {
+}
+
+
int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index f7d8609..c01c0d2 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -397,6 +397,30 @@
}
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
+void OS::SignalCodeMovingGC() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Support for ll_prof.py.
+ //
+ // The Linux profiler built into the kernel logs all mmap's with
+ // PROT_EXEC so that analysis tools can properly attribute ticks. We
+ // do a mmap with a name known by ll_prof.py and immediately munmap
+ // it. This injects a GC marker into the stream of events generated
+ // by the kernel and allows us to synchronize V8 code log and the
+ // kernel log.
+ int size = sysconf(_SC_PAGESIZE);
+ FILE* f = fopen(kGCFakeMmap, "w+");
+ void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+ fileno(f), 0);
+ ASSERT(addr != MAP_FAILED);
+ munmap(addr, size);
+ fclose(f);
+#endif
+}
+
+
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
#ifdef __GLIBC__
@@ -748,6 +772,7 @@
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
+ if (!IsVmThread()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent();
@@ -755,6 +780,7 @@
// We always sample the VM state.
sample->state = VMState::current_state();
+
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
@@ -783,9 +809,7 @@
// Implement this on MIPS.
UNIMPLEMENTED();
#endif
- if (IsVmThread()) {
- active_sampler_->SampleStack(sample);
- }
+ active_sampler_->SampleStack(sample);
}
active_sampler_->Tick(sample);
@@ -806,7 +830,10 @@
Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
+ : interval_(interval),
+ profiling_(profiling),
+ synchronous_(profiling),
+ active_(false) {
data_ = new PlatformData();
}
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 47193de..3e4daf3 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -245,6 +245,10 @@
}
+void OS::SignalCodeMovingGC() {
+}
+
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
// MacOSX requires all these to install so we can assume they are present.
// These constants are defined by the CPUid instructions.
@@ -549,17 +553,24 @@
// Sampler thread handler.
void Runner() {
- // Loop until the sampler is disengaged, keeping the specified samling freq.
+ // Loop until the sampler is disengaged, keeping the specified
+ // sampling frequency.
for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
+ // If the sampler runs in sync with the JS thread, we try to
+ // suspend it. If we fail, we skip the current sample.
+ if (sampler_->IsSynchronous()) {
+ if (KERN_SUCCESS != thread_suspend(profiled_thread_)) continue;
+ }
+
// We always sample the VM state.
sample->state = VMState::current_state();
+
// If profiling, we record the pc and sp of the profiled thread.
- if (sampler_->IsProfiling()
- && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
+ if (sampler_->IsProfiling()) {
#if V8_HOST_ARCH_X64
thread_state_flavor_t flavor = x86_THREAD_STATE64;
x86_thread_state64_t state;
@@ -591,11 +602,14 @@
sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(sample);
}
- thread_resume(profiled_thread_);
}
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(sample);
+
+ // If the sampler runs in sync with the JS thread, we have to
+ // remember to resume it.
+ if (sampler_->IsSynchronous()) thread_resume(profiled_thread_);
}
}
};
@@ -613,7 +627,10 @@
Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
+ : interval_(interval),
+ profiling_(profiling),
+ synchronous_(profiling),
+ active_(false) {
data_ = new PlatformData(this);
}
@@ -624,9 +641,9 @@
void Sampler::Start() {
- // If we are profiling, we need to be able to access the calling
- // thread.
- if (IsProfiling()) {
+ // If we are starting a synchronous sampler, we need to be able to
+ // access the calling thread.
+ if (IsSynchronous()) {
data_->profiled_thread_ = mach_thread_self();
}
@@ -655,7 +672,7 @@
pthread_join(data_->sampler_thread_, NULL);
// Deallocate Mach port for thread.
- if (IsProfiling()) {
+ if (IsSynchronous()) {
mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
}
}
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index b8392e8..b5caa5e 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -240,6 +240,11 @@
}
+void OS::SignalCodeMovingGC() {
+ UNIMPLEMENTED();
+}
+
+
int OS::StackWalk(Vector<OS::StackFrame> frames) {
UNIMPLEMENTED();
return 0;
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 05ed9ee..e03059a 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -289,6 +289,10 @@
}
+void OS::SignalCodeMovingGC() {
+}
+
+
int OS::StackWalk(Vector<OS::StackFrame> frames) {
UNIMPLEMENTED();
return 1;
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 6d97ed7..fcd69de 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -256,6 +256,10 @@
}
+void OS::SignalCodeMovingGC() {
+}
+
+
struct StackWalker {
Vector<OS::StackFrame>& frames;
int index;
@@ -598,7 +602,10 @@
Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
+ : interval_(interval),
+ profiling_(profiling),
+ synchronous_(profiling),
+ active_(false) {
data_ = new PlatformData();
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 86314a8..caea16c 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -845,14 +845,15 @@
bool is_executable) {
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT
static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const intptr_t kAllocationRandomAddressMax = 0x000004FFFFFFFFFF;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
- static const intptr_t kAllocationRandomAddressMax = 0x4FFFFFFF;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
// VirtualAlloc rounds allocated size to page size automatically.
@@ -1217,6 +1218,10 @@
}
+void OS::SignalCodeMovingGC() {
+}
+
+
// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
// Switch off warning 4748 (/GS can not protect parameters and local variables
@@ -1838,17 +1843,25 @@
// Context used for sampling the register state of the profiled thread.
CONTEXT context;
memset(&context, 0, sizeof(context));
- // Loop until the sampler is disengaged, keeping the specified samling freq.
+ // Loop until the sampler is disengaged, keeping the specified
+ // sampling frequency.
for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
+ // If the sampler runs in sync with the JS thread, we try to
+ // suspend it. If we fail, we skip the current sample.
+ if (sampler_->IsSynchronous()) {
+ static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread_) == kSuspendFailed) continue;
+ }
+
// We always sample the VM state.
sample->state = VMState::current_state();
+
// If profiling, we record the pc and sp of the profiled thread.
- if (sampler_->IsProfiling()
- && SuspendThread(profiled_thread_) != (DWORD)-1) {
+ if (sampler_->IsProfiling()) {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
@@ -1862,11 +1875,14 @@
#endif
sampler_->SampleStack(sample);
}
- ResumeThread(profiled_thread_);
}
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(sample);
+
+ // If the sampler runs in sync with the JS thread, we have to
+ // remember to resume it.
+ if (sampler_->IsSynchronous()) ResumeThread(profiled_thread_);
}
}
};
@@ -1883,7 +1899,10 @@
// Initialize a profile sampler.
Sampler::Sampler(int interval, bool profiling)
- : interval_(interval), profiling_(profiling), active_(false) {
+ : interval_(interval),
+ profiling_(profiling),
+ synchronous_(profiling),
+ active_(false) {
data_ = new PlatformData(this);
}
@@ -1895,9 +1914,9 @@
// Start profiling.
void Sampler::Start() {
- // If we are profiling, we need to be able to access the calling
- // thread.
- if (IsProfiling()) {
+ // If we are starting a synchronous sampler, we need to be able to
+ // access the calling thread.
+ if (IsSynchronous()) {
// Get a handle to the calling thread. This is the thread that we are
// going to profile. We need to make a copy of the handle because we are
// going to use it in the sampler thread. Using GetThreadHandle() will
diff --git a/src/platform.h b/src/platform.h
index e9e7c22..42e6eae 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -257,11 +257,16 @@
static char* StrChr(char* str, int c);
static void StrNCpy(Vector<char> dest, const char* src, size_t n);
- // Support for profiler. Can do nothing, in which case ticks
- // occuring in shared libraries will not be properly accounted
- // for.
+ // Support for the profiler. Can do nothing, in which case ticks
+ // occuring in shared libraries will not be properly accounted for.
static void LogSharedLibraryAddresses();
+ // Support for the profiler. Notifies the external profiling
+ // process that a code moving garbage collection starts. Can do
+ // nothing, in which case the code objects must not move (e.g., by
+ // using --never-compact) if accurate profiling is desired.
+ static void SignalCodeMovingGC();
+
// The return value indicates the CPU features we are sure of because of the
// OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
// instructions.
@@ -563,17 +568,24 @@
void Start();
void Stop();
- // Is the sampler used for profiling.
- inline bool IsProfiling() { return profiling_; }
+ // Is the sampler used for profiling?
+ bool IsProfiling() const { return profiling_; }
+
+ // Is the sampler running in sync with the JS thread? On platforms
+ // where the sampler is implemented with a thread that wakes up
+ // every now and then, having a synchronous sampler implies
+ // suspending/resuming the JS thread.
+ bool IsSynchronous() const { return synchronous_; }
// Whether the sampler is running (that is, consumes resources).
- inline bool IsActive() { return active_; }
+ bool IsActive() const { return active_; }
class PlatformData;
private:
const int interval_;
const bool profiling_;
+ const bool synchronous_;
bool active_;
PlatformData* data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index 90abe91..6fbb14a 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -145,6 +145,12 @@
}
+void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
+ ASSERT(is_uint24(by));
+ Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
+}
+
+
void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
ASSERT(register_index >= 0);
ASSERT(register_index <= kMaxRegister);
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 3ddbc2f..6c9c2eb 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -65,6 +65,7 @@
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
+ virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 41c674b..463c1a8 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -136,6 +136,12 @@
}
+void RegExpMacroAssemblerTracer::SetCurrentPositionFromEnd(int by) {
+ PrintF(" SetCurrentPositionFromEnd(by=%d);\n", by);
+ assembler_->SetCurrentPositionFromEnd(by);
+}
+
+
void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
assembler_->SetRegister(register_index, to);
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 9608f9e..6a8f4d4 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -89,6 +89,7 @@
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 652b690..dc3bd82 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -155,6 +155,7 @@
StackCheckFlag check_stack_limit) = 0;
virtual void ReadCurrentPositionFromRegister(int reg) = 0;
virtual void ReadStackPointerFromRegister(int reg) = 0;
+ virtual void SetCurrentPositionFromEnd(int by) = 0;
virtual void SetRegister(int register_index, int to) = 0;
virtual void Succeed() = 0;
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
diff --git a/src/runtime.cc b/src/runtime.cc
index c80f1fc..9a604a0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -6703,7 +6703,7 @@
static Object* Runtime_StackGuard(Arguments args) {
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
if (StackGuard::IsStackOverflow()) {
@@ -10153,7 +10153,7 @@
if (failure->IsRetryAfterGC()) {
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
- Heap::CollectGarbage(failure->requested(), failure->allocation_space());
+ Heap::CollectGarbage(failure->allocation_space());
} else {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
diff --git a/src/runtime.h b/src/runtime.h
index 19f4144..2cd95c4 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -267,7 +267,7 @@
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
- F(StackGuard, 1, 1) \
+ F(StackGuard, 0, 1) \
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
diff --git a/src/serialize.cc b/src/serialize.cc
index cde7577..ccba737 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -500,7 +500,7 @@
ExternalReferenceDecoder::ExternalReferenceDecoder()
- : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+ : encodings_(NewArray<Address*>(kTypeCodeCount)) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance();
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
@@ -619,6 +619,8 @@
external_reference_decoder_ = new ExternalReferenceDecoder();
Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
Heap::IterateWeakRoots(this, VISIT_ALL);
+
+ Heap::set_global_contexts_list(Heap::undefined_value());
}
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index fbb2673..8a0dd07 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -407,8 +407,7 @@
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
- ASSERT(p->is_valid());
-
+ if (!p->is_valid()) return false;
return MemoryAllocator::IsPageInSpace(p, this);
}
@@ -440,7 +439,7 @@
object = SlowAllocateRaw(size_in_bytes);
if (object != NULL) return object;
- return Failure::RetryAfterGC(size_in_bytes, identity());
+ return Failure::RetryAfterGC(identity());
}
@@ -454,7 +453,7 @@
object = SlowMCAllocateRaw(size_in_bytes);
if (object != NULL) return object;
- return Failure::RetryAfterGC(size_in_bytes, identity());
+ return Failure::RetryAfterGC(identity());
}
@@ -475,7 +474,7 @@
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;
- if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
+ if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
Object* obj = HeapObject::FromAddress(alloc_info->top);
alloc_info->top = new_top;
diff --git a/src/spaces.cc b/src/spaces.cc
index d824c30..5bdbcc7 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1828,7 +1828,7 @@
if (cur == kEnd) {
// No large enough size in list.
*wasted_bytes = 0;
- return Failure::RetryAfterGC(size_in_bytes, owner_);
+ return Failure::RetryAfterGC(owner_);
}
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
int rem = cur - index;
@@ -1926,7 +1926,7 @@
Object* FixedSizeFreeList::Allocate() {
if (head_ == NULL) {
- return Failure::RetryAfterGC(object_size_, owner_);
+ return Failure::RetryAfterGC(owner_);
}
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
@@ -2753,14 +2753,14 @@
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
- return Failure::RetryAfterGC(requested_size, identity());
+ return Failure::RetryAfterGC(identity());
}
size_t chunk_size;
LargeObjectChunk* chunk =
LargeObjectChunk::New(requested_size, &chunk_size, executable);
if (chunk == NULL) {
- return Failure::RetryAfterGC(requested_size, identity());
+ return Failure::RetryAfterGC(identity());
}
size_ += static_cast<int>(chunk_size);
diff --git a/src/spaces.h b/src/spaces.h
index 2fdb96f..0e6a91e 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -2194,7 +2194,6 @@
// if such a page doesn't exist.
LargeObjectChunk* FindChunkContainingPc(Address pc);
-
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
diff --git a/src/strtod.cc b/src/strtod.cc
index 68444fc..ae278bd 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -85,12 +85,22 @@
extern "C" double gay_strtod(const char* s00, const char** se);
static double old_strtod(Vector<const char> buffer, int exponent) {
+ // gay_strtod is broken on Linux,x86. For numbers with few decimal digits
+ // the computation is done using floating-point operations which (on Linux)
+ // are prone to double-rounding errors.
+ // By adding several zeroes to the buffer gay_strtod falls back to a slower
+ // (but correct) algorithm.
+ const int kInsertedZeroesCount = 20;
char gay_buffer[1024];
Vector<char> gay_buffer_vector(gay_buffer, sizeof(gay_buffer));
int pos = 0;
for (int i = 0; i < buffer.length(); ++i) {
gay_buffer_vector[pos++] = buffer[i];
}
+ for (int i = 0; i < kInsertedZeroesCount; ++i) {
+ gay_buffer_vector[pos++] = '0';
+ }
+ exponent -= kInsertedZeroesCount;
gay_buffer_vector[pos++] = 'e';
if (exponent < 0) {
gay_buffer_vector[pos++] = '-';
@@ -139,13 +149,18 @@
}
-double Strtod(Vector<const char> buffer, int exponent) {
- Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
- Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
- exponent += left_trimmed.length() - trimmed.length();
- if (trimmed.length() == 0) return 0.0;
- if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
- if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
+static bool DoubleStrtod(Vector<const char> trimmed,
+ int exponent,
+ double* result) {
+#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
+ // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
+ // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
+ // result is not accurate.
+ // We know that Windows32 uses 64 bits and is therefore accurate.
+ // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
+ // the same problem.
+ return false;
+#endif
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
// The trimmed input fits into a double.
// If the 10^exponent (resp. 10^-exponent) fits into a double too then we
@@ -155,13 +170,15 @@
// return the best possible approximation.
if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
// 10^-exponent fits into a double.
- double buffer_d = static_cast<double>(ReadUint64(trimmed));
- return buffer_d / exact_powers_of_ten[-exponent];
+ *result = static_cast<double>(ReadUint64(trimmed));
+ *result /= exact_powers_of_ten[-exponent];
+ return true;
}
if (0 <= exponent && exponent < kExactPowersOfTenSize) {
// 10^exponent fits into a double.
- double buffer_d = static_cast<double>(ReadUint64(trimmed));
- return buffer_d * exact_powers_of_ten[exponent];
+ *result = static_cast<double>(ReadUint64(trimmed));
+ *result *= exact_powers_of_ten[exponent];
+ return true;
}
int remaining_digits =
kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
@@ -170,11 +187,27 @@
// The trimmed string was short and we can multiply it with
// 10^remaining_digits. As a result the remaining exponent now fits
// into a double too.
- double buffer_d = static_cast<double>(ReadUint64(trimmed));
- buffer_d *= exact_powers_of_ten[remaining_digits];
- return buffer_d * exact_powers_of_ten[exponent - remaining_digits];
+ *result = static_cast<double>(ReadUint64(trimmed));
+ *result *= exact_powers_of_ten[remaining_digits];
+ *result *= exact_powers_of_ten[exponent - remaining_digits];
+ return true;
}
}
+ return false;
+}
+
+
+double Strtod(Vector<const char> buffer, int exponent) {
+ Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
+ Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
+ exponent += left_trimmed.length() - trimmed.length();
+ if (trimmed.length() == 0) return 0.0;
+ if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
+ if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
+ double result;
+ if (DoubleStrtod(trimmed, exponent, &result)) {
+ return result;
+ }
return old_strtod(trimmed, exponent);
}
diff --git a/src/top.cc b/src/top.cc
index 777f041..9ce6542 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -69,6 +69,9 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0;
#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+ current_vm_state_ = NULL;
+#endif
try_catch_handler_address_ = NULL;
context_ = NULL;
int id = ThreadManager::CurrentId();
diff --git a/src/top.h b/src/top.h
index 776c43e..a2ba3dd 100644
--- a/src/top.h
+++ b/src/top.h
@@ -41,6 +41,7 @@
class SaveContext; // Forward declaration.
class ThreadVisitor; // Defined in v8threads.h
+class VMState; // Defined in vm-state.h
class ThreadLocalTop BASE_EMBEDDED {
public:
@@ -101,10 +102,15 @@
// Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
+
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+ VMState* current_vm_state_;
+#endif
+
// Generated code scratch locations.
int32_t formal_count_;
@@ -254,6 +260,16 @@
}
#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+ static VMState* current_vm_state() {
+ return thread_local_.current_vm_state_;
+ }
+
+ static void set_current_vm_state(VMState* state) {
+ thread_local_.current_vm_state_ = state;
+ }
+#endif
+
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
diff --git a/src/version.cc b/src/version.cc
index 33b4977..6d98b04 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 5
-#define BUILD_NUMBER 0
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index aa4cedb..74f4a6a 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -75,9 +75,9 @@
#endif
state_ = state;
// Save the previous state.
- previous_ = reinterpret_cast<VMState*>(current_state_);
+ previous_ = Top::current_vm_state();
// Install the new state.
- OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(this));
+ Top::set_current_vm_state(this);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
@@ -106,7 +106,7 @@
VMState::~VMState() {
if (disabled_) return;
// Return to the previous state.
- OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(previous_));
+ Top::set_current_vm_state(previous_);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
diff --git a/src/vm-state.cc b/src/vm-state.cc
deleted file mode 100644
index 6bd737d..0000000
--- a/src/vm-state.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "vm-state.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_VMSTATE_TRACKING
-AtomicWord VMState::current_state_ = 0;
-#endif
-
-} } // namespace v8::internal
diff --git a/src/vm-state.h b/src/vm-state.h
index 080eb8d..cc91e83 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -28,6 +28,8 @@
#ifndef V8_VM_STATE_H_
#define V8_VM_STATE_H_
+#include "top.h"
+
namespace v8 {
namespace internal {
@@ -44,16 +46,16 @@
// Used for debug asserts.
static bool is_outermost_external() {
- return current_state_ == 0;
+ return Top::current_vm_state() == 0;
}
static StateTag current_state() {
- VMState* state = reinterpret_cast<VMState*>(current_state_);
+ VMState* state = Top::current_vm_state();
return state ? state->state() : EXTERNAL;
}
static Address external_callback() {
- VMState* state = reinterpret_cast<VMState*>(current_state_);
+ VMState* state = Top::current_vm_state();
return state ? state->external_callback_ : NULL;
}
@@ -63,8 +65,6 @@
VMState* previous_;
Address external_callback_;
- // A stack of VM states.
- static AtomicWord current_state_;
#else
public:
explicit VMState(StateTag state) {}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d592037..2d87667 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2123,7 +2123,7 @@
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
__ subq(rdx, rax);
__ j(no_overflow, &smi_done);
- __ neg(rdx); // Correct sign in case of overflow.
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movq(rax, rdx);
__ ret(0);
@@ -2394,16 +2394,7 @@
void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1bd3443..cb91067 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -101,9 +101,9 @@
// dirty. |object| is the object being stored into, |value| is the
// object being stored. If |offset| is zero, then the |scratch|
// register contains the array index into the elements array
- // represented as a Smi. All registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update the
- // write barrier if the value is a smi.
+ // represented as an untagged 32-bit integer. All registers are
+ // clobbered by the operation. RecordWrite filters out smis so it
+ // does not update the write barrier if the value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
@@ -122,7 +122,7 @@
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
- // the elements array represented as a Smi.
+ // the elements array represented as an untagged 32-bit integer.
// All registers are clobbered by the operation.
void RecordWriteNonSmi(Register object,
int offset,
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 91e2b44..47c19c7 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -145,7 +145,6 @@
void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
if (by != 0) {
- Label inside_string;
__ addq(rdi, Immediate(by * char_size()));
}
}
@@ -1053,6 +1052,19 @@
}
+void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
+ NearLabel after_position;
+ __ cmpq(rdi, Immediate(-by * char_size()));
+ __ j(greater_equal, &after_position);
+ __ movq(rdi, Immediate(-by * char_size()));
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ movq(register_location(register_index), Immediate(to));
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 3bcc3ac..182bc55 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -93,6 +93,7 @@
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);