Version 3.10.1
Fixed bug with arguments object in inlined functions (issue 2045).
Fixed performance bug with lazy initialization (Chromium issue 118686).
Added suppport for Mac OS X 64bit builds with GYP. (Patch contributed by Filipe David Manana <fdmanana@gmail.com>)
Fixed bug with hidden properties (issue 2034).
Fixed a performance bug when reloading pages (Chromium issue 117767, V8 issue 1902).
Fixed bug when optimizing throw in top-level code (issue 2054).
Fixed two bugs with array literals (issue 2055, Chromium issue 121407).
Fixed bug with Math.min/Math.max with NaN inputs (issue 2056).
Fixed a bug with the new runtime profiler (Chromium issue 121147).
Fixed compilation of V8 using uClibc.
Optimized boot-up memory use.
Optimized regular expressions.
git-svn-id: http://v8.googlecode.com/svn/trunk@11253 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/AUTHORS b/AUTHORS
index dfefad1..6e46b3d 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -23,6 +23,7 @@
Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
+Filipe David Manana <fdmanana@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
diff --git a/ChangeLog b/ChangeLog
index e7c085e..25eaf56 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,33 @@
+2012-04-10: Version 3.10.1
+
+ Fixed bug with arguments object in inlined functions (issue 2045).
+
+ Fixed performance bug with lazy initialization (Chromium issue
+ 118686).
+
+ Added suppport for Mac OS X 64bit builds with GYP.
+ (Patch contributed by Filipe David Manana <fdmanana@gmail.com>)
+
+ Fixed bug with hidden properties (issue 2034).
+
+ Fixed a performance bug when reloading pages (Chromium issue 117767,
+ V8 issue 1902).
+
+ Fixed bug when optimizing throw in top-level code (issue 2054).
+
+ Fixed two bugs with array literals (issue 2055, Chromium issue 121407).
+
+ Fixed bug with Math.min/Math.max with NaN inputs (issue 2056).
+
+ Fixed a bug with the new runtime profiler (Chromium issue 121147).
+
+ Fixed compilation of V8 using uClibc.
+
+ Optimized boot-up memory use.
+
+ Optimized regular expressions.
+
+
2012-03-30: Version 3.10.0
Fixed store IC writability check in strict mode
diff --git a/Makefile b/Makefile
index 72d08c4..da1d688 100644
--- a/Makefile
+++ b/Makefile
@@ -266,4 +266,4 @@
# Dependencies.
dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
- --revision 1026
+ --revision 1282
diff --git a/build/common.gypi b/build/common.gypi
index 7647a79..3016d0c 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -238,6 +238,19 @@
],
}],
],
+ }, { # Section for OS=="mac".
+ 'conditions': [
+ ['target_arch=="ia32"', {
+ 'xcode_settings': {
+ 'ARCHS': ['i386'],
+ }
+ }],
+ ['target_arch=="x64"', {
+ 'xcode_settings': {
+ 'ARCHS': ['x86_64'],
+ }
+ }],
+ ],
}],
['v8_use_liveobjectlist=="true"', {
'defines': [
diff --git a/build/gyp_v8 b/build/gyp_v8
index 4293e76..0fe3403 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -1,6 +1,6 @@
#!/usr/bin/python
#
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -156,7 +156,12 @@
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
- gyp_args.append('-Dtarget_arch=ia32')
+ target_arch = None
+ for p in gyp_args:
+ if p.find('-Dtarget_arch=') == 0:
+ target_arch = p
+ if target_arch is None:
+ gyp_args.append('-Dtarget_arch=ia32')
if utils.GuessOS() == 'linux':
gyp_args.append('-S-ia32')
run_gyp(gyp_args)
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index db0c3b4..db95f78 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -3653,7 +3653,7 @@
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1));
+ __ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -3690,7 +3690,7 @@
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2));
+ __ add(string_length, string_length, Operand(scratch2), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 7a099cc..7c37e8e 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -4472,6 +4472,14 @@
deferred->entry(),
TAG_OBJECT);
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
@@ -4490,14 +4498,14 @@
__ str(scratch, FieldMemOperand(result, property_offset));
}
}
-
- __ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4505,9 +4513,9 @@
__ mov(result, Operand(0));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ LoadHeapObject(r0, constructor);
+ __ mov(r0, Operand(Smi::FromInt(instance_size)));
__ push(r0);
- CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result);
}
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 9fd4bdf..a833624 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -452,8 +452,12 @@
void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
+ if (c == 0) {
+ __ tst(current_character(), Operand(mask));
+ } else {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ }
BranchOrBacktrack(eq, on_equal);
}
@@ -461,8 +465,12 @@
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
+ if (c == 0) {
+ __ tst(current_character(), Operand(mask));
+ } else {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ }
BranchOrBacktrack(ne, on_not_equal);
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 06f8385..cfd93bc 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1730,7 +1730,7 @@
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@@ -1738,7 +1738,7 @@
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
- __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1);
__ Ret();
diff --git a/src/assembler.cc b/src/assembler.cc
index 4944202..40765b3 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -99,21 +99,7 @@
double the_hole_nan;
};
-struct InitializeDoubleConstants {
- static void Construct(DoubleConstant* double_constants) {
- double_constants->min_int = kMinInt;
- double_constants->one_half = 0.5;
- double_constants->minus_zero = -0.0;
- double_constants->uint8_max_value = 255;
- double_constants->zero = 0.0;
- double_constants->canonical_non_hole_nan = OS::nan_value();
- double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
- double_constants->negative_infinity = -V8_INFINITY;
- }
-};
-
-static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
- double_constants = LAZY_INSTANCE_INITIALIZER;
+static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@@ -726,6 +712,18 @@
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
+void ExternalReference::SetUp() {
+ double_constants.min_int = kMinInt;
+ double_constants.one_half = 0.5;
+ double_constants.minus_zero = -0.0;
+ double_constants.uint8_max_value = 255;
+ double_constants.zero = 0.0;
+ double_constants.canonical_non_hole_nan = OS::nan_value();
+ double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
+ double_constants.negative_infinity = -V8_INFINITY;
+}
+
+
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
: address_(Redirect(isolate, Builtins::c_function_address(id))) {}
@@ -958,50 +956,47 @@
ExternalReference ExternalReference::address_of_min_int() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->min_int));
+ return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->one_half));
+ return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
}
ExternalReference ExternalReference::address_of_minus_zero() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->minus_zero));
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_zero));
}
ExternalReference ExternalReference::address_of_zero() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->zero));
+ return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->uint8_max_value));
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.uint8_max_value));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->negative_infinity));
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.negative_infinity));
}
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->canonical_non_hole_nan));
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
- return ExternalReference(reinterpret_cast<void*>(
- &double_constants.Pointer()->the_hole_nan));
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.the_hole_nan));
}
diff --git a/src/assembler.h b/src/assembler.h
index 918a2a6..6deca26 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -535,6 +535,8 @@
DIRECT_GETTER_CALL
};
+ static void SetUp();
+
typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index a48bb4d..0e95b4b 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1301,6 +1301,12 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate->debugger()->set_compiling_natives(true);
#endif
+ // During genesis, the boilerplate for stack overflow won't work until the
+ // environment has been at least partially initialized. Add a stack check
+ // before entering JS code to catch overflow early.
+ StackLimitCheck check(Isolate::Current());
+ if (check.HasOverflowed()) return false;
+
bool result = CompileScriptCached(name,
source,
NULL,
@@ -2289,6 +2295,12 @@
HandleScope scope;
SaveContext saved_context(isolate);
+ // During genesis, the boilerplate for stack overflow won't work until the
+ // environment has been at least partially initialized. Add a stack check
+ // before entering JS code to catch overflow early.
+ StackLimitCheck check(Isolate::Current());
+ if (check.HasOverflowed()) return;
+
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
global_context_ =
diff --git a/src/debug.cc b/src/debug.cc
index b6cdd7f..99256ba 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -767,15 +767,22 @@
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
- Execution::TryCall(function, Handle<Object>(context->global()),
- 0, NULL, &caught_exception);
+ Handle<Object> exception =
+ Execution::TryCall(function, Handle<Object>(context->global()),
+ 0, NULL, &caught_exception);
// Check for caught exceptions.
if (caught_exception) {
+ ASSERT(!isolate->has_pending_exception());
+ MessageLocation computed_location;
+ isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
- Handle<String>(), Handle<JSArray>());
+ "error_loading_debugger", &computed_location,
+ Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+ ASSERT(!isolate->has_pending_exception());
+ isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ isolate->clear_pending_exception();
return false;
}
@@ -813,6 +820,9 @@
v8::Handle<ObjectTemplate>(),
NULL);
+ // Fail if no context could be created.
+ if (context.is_null()) return false;
+
// Use the debugger context.
SaveContext save(isolate_);
isolate_->set_context(*context);
@@ -3232,7 +3242,7 @@
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
- if (prev_ == NULL) {
+ if (!load_failed_ && prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
diff --git a/src/execution.cc b/src/execution.cc
index ea8cc37..5618975 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -826,6 +826,11 @@
return isolate->heap()->undefined_value();
}
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ return isolate->heap()->undefined_value();
+ }
+
{
JavaScriptFrameIterator it(isolate);
ASSERT(!it.done());
@@ -862,6 +867,11 @@
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ return;
+ }
+
HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0668add..75697a8 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -310,7 +310,9 @@
DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
// execution.cc
-DEFINE_int(stack_size, kPointerSize * 128,
+// Slightly less than 1MB on 64-bit, since Windows' default stack size for
+// the main execution thread is 1MB for both 32 and 64-bit.
+DEFINE_int(stack_size, kPointerSize * 123,
"default size of stack region v8 is allowed to use (in kBytes)")
// frames.cc
diff --git a/src/frames.cc b/src/frames.cc
index 0571a81..5911284 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1369,24 +1369,23 @@
struct JSCallerSavedCodeData {
- JSCallerSavedCodeData() {
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCallerSaved & (1 << r)) != 0)
- reg_code[i++] = r;
-
- ASSERT(i == kNumJSCallerSaved);
- }
int reg_code[kNumJSCallerSaved];
};
+JSCallerSavedCodeData caller_saved_code_data;
-static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
- LAZY_INSTANCE_INITIALIZER;
+void SetUpJSCallerSavedCodeData() {
+ int i = 0;
+ for (int r = 0; r < kNumRegs; r++)
+ if ((kJSCallerSaved & (1 << r)) != 0)
+ caller_saved_code_data.reg_code[i++] = r;
+
+ ASSERT(i == kNumJSCallerSaved);
+}
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
- return caller_saved_code_data.Get().reg_code[n];
+ return caller_saved_code_data.reg_code[n];
}
diff --git a/src/frames.h b/src/frames.h
index 9071555..7178bd4 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -40,6 +40,8 @@
// Get the number of registers in a given register list.
int NumRegs(RegList list);
+void SetUpJSCallerSavedCodeData();
+
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
diff --git a/src/heap.cc b/src/heap.cc
index 4ce1816..6070723 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -42,6 +42,7 @@
#include "natives.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
+#include "once.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
@@ -60,8 +61,6 @@
namespace v8 {
namespace internal {
-static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
-
Heap::Heap()
: isolate_(NULL),
@@ -145,7 +144,6 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
- idle_notification_will_schedule_next_gc_(false),
mark_sweeps_since_idle_round_started_(0),
ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
@@ -504,11 +502,17 @@
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ // Make progress in incremental marking.
+ const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+ incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (!incremental_marking()->IsComplete()) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ }
+ collector = SCAVENGER;
+ collector_reason = "incremental marking delaying mark-sweep";
}
- collector = SCAVENGER;
- collector_reason = "incremental marking delaying mark-sweep";
}
bool next_gc_likely_to_collect_more = false;
@@ -4817,10 +4821,8 @@
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
+ incremental_marking()->Step(step_size,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
@@ -5847,6 +5849,15 @@
#endif
+
+V8_DECLARE_ONCE(initialize_gc_once);
+
+static void InitializeGCOnce() {
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
+}
+
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@@ -5865,15 +5876,7 @@
if (!ConfigureHeapDefault()) return false;
}
- gc_initializer_mutex.Pointer()->Lock();
- static bool initialized_gc = false;
- if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
- }
- gc_initializer_mutex.Pointer()->Unlock();
+ CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);
diff --git a/src/heap.h b/src/heap.h
index 2bd037f..0391e0e 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1569,10 +1569,6 @@
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
- bool idle_notification_will_schedule_next_gc() {
- return idle_notification_will_schedule_next_gc_;
- }
-
uint32_t HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
ASSERT(FLAG_randomize_hashes || seed == 0);
@@ -2033,7 +2029,6 @@
unsigned int last_idle_notification_gc_count_;
bool last_idle_notification_gc_count_init_;
- bool idle_notification_will_schedule_next_gc_;
int mark_sweeps_since_idle_round_started_;
int ms_count_at_last_idle_notification_;
unsigned int gc_count_at_last_idle_gc_;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 4a5df49..d0dd568 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -4406,6 +4406,9 @@
set_representation(Representation::Tagged());
}
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxSize = 64 * kPointerSize;
+
HValue* context() { return OperandAt(0); }
Handle<JSFunction> constructor() { return constructor_; }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 2255ab6..f9d4191 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -5730,7 +5730,27 @@
set_current_block(return_left);
Push(left);
set_current_block(return_right);
- Push(right);
+ // The branch above always returns the right operand if either of
+ // them is NaN, but the spec requires that max/min(NaN, X) = NaN.
+ // We add another branch that checks if the left operand is NaN or not.
+ if (left_operand->representation().IsDouble()) {
+ // If left_operand != left_operand then it is NaN.
+ HCompareIDAndBranch* compare_nan = new(zone()) HCompareIDAndBranch(
+ left_operand, left_operand, Token::EQ);
+ compare_nan->SetInputRepresentation(left_operand->representation());
+ HBasicBlock* left_is_number = graph()->CreateBasicBlock();
+ HBasicBlock* left_is_nan = graph()->CreateBasicBlock();
+ compare_nan->SetSuccessorAt(0, left_is_number);
+ compare_nan->SetSuccessorAt(1, left_is_nan);
+ current_block()->Finish(compare_nan);
+ set_current_block(left_is_nan);
+ Push(left);
+ set_current_block(left_is_number);
+ Push(right);
+ return_right = CreateJoin(left_is_number, left_is_nan, expr->id());
+ } else {
+ Push(right);
+ }
HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
set_current_block(join);
@@ -6028,7 +6048,8 @@
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
- constructor->initial_map()->instance_type() == JS_OBJECT_TYPE;
+ constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+ constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 67e2dca..72f59d0 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -4306,6 +4306,14 @@
deferred->entry(),
TAG_OBJECT);
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(scratch, constructor);
@@ -4340,14 +4348,14 @@
__ mov(FieldOperand(result, property_offset), scratch);
}
}
-
- __ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4355,8 +4363,9 @@
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
- __ PushHeapObject(constructor);
- CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
+ __ push(Immediate(Smi::FromInt(instance_size)));
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 747b79a..0029f33 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -504,8 +504,8 @@
if (c == 0) {
__ test(current_character(), Immediate(mask));
} else {
- __ mov(eax, current_character());
- __ and_(eax, mask);
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
__ cmp(eax, c);
}
BranchOrBacktrack(equal, on_equal);
@@ -518,8 +518,8 @@
if (c == 0) {
__ test(current_character(), Immediate(mask));
} else {
- __ mov(eax, current_character());
- __ and_(eax, mask);
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
__ cmp(eax, c);
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -569,8 +569,8 @@
__ mov(eax, Immediate(table));
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
- __ mov(ebx, current_character());
- __ and_(ebx, kTableSize - 1);
+ __ mov(ebx, kTableSize - 1);
+ __ and_(ebx, current_character());
index = ebx;
}
__ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 8fe89b4..7bbd521 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -205,6 +205,12 @@
MarkObject(target);
}
+ void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
+ if (shared->ic_age() != heap_->global_ic_age()) {
+ shared->ResetForNewContext(heap_->global_ic_age());
+ }
+ }
+
void VisitPointer(Object** p) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
@@ -743,7 +749,7 @@
}
-void IncrementalMarking::MarkingComplete() {
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a
@@ -754,13 +760,14 @@
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
- if (!heap_->idle_notification_will_schedule_next_gc()) {
+ if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
}
-void IncrementalMarking::Step(intptr_t allocated_bytes) {
+void IncrementalMarking::Step(intptr_t allocated_bytes,
+ CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
!FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
@@ -833,7 +840,7 @@
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
- if (marking_deque_.IsEmpty()) MarkingComplete();
+ if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
allocated_ = 0;
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index 4f8fa6b..8cbe6c1 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -46,6 +46,11 @@
COMPLETE
};
+ enum CompletionAction {
+ GC_VIA_STACK_GUARD,
+ NO_GC_VIA_STACK_GUARD
+ };
+
explicit IncrementalMarking(Heap* heap);
void TearDown();
@@ -82,7 +87,7 @@
void Abort();
- void MarkingComplete();
+ void MarkingComplete(CompletionAction action);
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
@@ -102,10 +107,11 @@
static const intptr_t kMaxAllocationMarkingFactor = 1000;
void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+ Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+ GC_VIA_STACK_GUARD);
}
- void Step(intptr_t allocated);
+ void Step(intptr_t allocated, CompletionAction action);
inline void RestartIfNotMarking() {
if (state_ == COMPLETE) {
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 21e3411..b7d0d30 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -108,6 +108,36 @@
}
+// More makes code generation slower, less makes V8 benchmark score lower.
+const int kMaxLookaheadForBoyerMoore = 8;
+// In a 3-character pattern you can maximally step forwards 3 characters
+// at a time, which is not always enough to pay for the extra logic.
+const int kPatternTooShortForBoyerMoore = 2;
+
+
+// Identifies the sort of regexps where the regexp engine is faster
+// than the code used for atom matches.
+static bool HasFewDifferentCharacters(Handle<String> pattern) {
+ int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
+ if (length <= kPatternTooShortForBoyerMoore) return false;
+ const int kMod = 128;
+ bool character_found[kMod];
+ int different = 0;
+ memset(&character_found[0], 0, sizeof(character_found));
+ for (int i = 0; i < length; i++) {
+ int ch = (pattern->Get(i) & (kMod - 1));
+ if (!character_found[ch]) {
+ character_found[ch] = true;
+ different++;
+ // We declare a regexp low-alphabet if it has at least 3 times as many
+ // characters as it has different characters.
+ if (different * 3 > length) return false;
+ }
+ }
+ return true;
+}
+
+
// Generic RegExp methods. Dispatches to implementation specific methods.
@@ -141,9 +171,14 @@
return Handle<Object>::null();
}
- if (parse_result.simple && !flags.is_ignore_case()) {
+ bool has_been_compiled = false;
+
+ if (parse_result.simple &&
+ !flags.is_ignore_case() &&
+ !HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
+ has_been_compiled = true;
} else if (parse_result.tree->IsAtom() &&
!flags.is_ignore_case() &&
parse_result.capture_count == 0) {
@@ -151,8 +186,12 @@
Vector<const uc16> atom_pattern = atom->data();
Handle<String> atom_string =
isolate->factory()->NewStringFromTwoByte(atom_pattern);
- AtomCompile(re, pattern, flags, atom_string);
- } else {
+ if (!HasFewDifferentCharacters(atom_string)) {
+ AtomCompile(re, pattern, flags, atom_string);
+ has_been_compiled = true;
+ }
+ }
+ if (!has_been_compiled) {
IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
}
ASSERT(re->data()->IsFixedArray());
@@ -280,7 +319,8 @@
// from the source pattern.
// If compilation fails, an exception is thrown and this function
// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
+bool RegExpImpl::EnsureCompiledIrregexp(
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
#ifdef V8_INTERPRETED_REGEXP
if (compiled_code->IsByteArray()) return true;
@@ -296,7 +336,7 @@
ASSERT(compiled_code->IsSmi());
return true;
}
- return CompileIrregexp(re, is_ascii);
+ return CompileIrregexp(re, sample_subject, is_ascii);
}
@@ -316,7 +356,9 @@
}
-bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
+bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
@@ -365,6 +407,7 @@
flags.is_ignore_case(),
flags.is_multiline(),
pattern,
+ sample_subject,
is_ascii);
if (result.error_message != NULL) {
// Unable to compile regexp.
@@ -435,7 +478,7 @@
// Check the asciiness of the underlying storage.
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
- if (!EnsureCompiledIrregexp(regexp, is_ascii)) return -1;
+ if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
@@ -466,7 +509,7 @@
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
- EnsureCompiledIrregexp(regexp, is_ascii);
+ EnsureCompiledIrregexp(regexp, subject, is_ascii);
Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
@@ -784,6 +827,53 @@
}
+class FrequencyCollator {
+ public:
+ FrequencyCollator() : total_samples_(0) {
+ for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
+ frequencies_[i] = CharacterFrequency(i);
+ }
+ }
+
+ void CountCharacter(int character) {
+ int index = (character & RegExpMacroAssembler::kTableMask);
+ frequencies_[index].Increment();
+ total_samples_++;
+ }
+
+ // Does not measure in percent, but rather per-128 (the table size from the
+ // regexp macro assembler).
+ int Frequency(int in_character) {
+ ASSERT((in_character & RegExpMacroAssembler::kTableMask) == in_character);
+ if (total_samples_ < 1) return 1; // Division by zero.
+ int freq_in_per128 =
+ (frequencies_[in_character].counter() * 128) / total_samples_;
+ return freq_in_per128;
+ }
+
+ private:
+ class CharacterFrequency {
+ public:
+ CharacterFrequency() : counter_(0), character_(-1) { }
+ explicit CharacterFrequency(int character)
+ : counter_(0), character_(character) { }
+
+ void Increment() { counter_++; }
+ int counter() { return counter_; }
+ int character() { return character_; }
+
+ private:
+ int counter_;
+ int character_;
+ };
+
+
+ private:
+ CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
+ int total_samples_;
+};
+
+
class RegExpCompiler {
public:
RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii);
@@ -819,6 +909,7 @@
inline bool ignore_case() { return ignore_case_; }
inline bool ascii() { return ascii_; }
+ FrequencyCollator* frequency_collator() { return &frequency_collator_; }
int current_expansion_factor() { return current_expansion_factor_; }
void set_current_expansion_factor(int value) {
@@ -837,6 +928,7 @@
bool ascii_;
bool reg_exp_too_big_;
int current_expansion_factor_;
+ FrequencyCollator frequency_collator_;
};
@@ -865,7 +957,8 @@
ignore_case_(ignore_case),
ascii_(ascii),
reg_exp_too_big_(false),
- current_expansion_factor_(1) {
+ current_expansion_factor_(1),
+ frequency_collator_() {
accept_ = new EndNode(EndNode::ACCEPT);
ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
}
@@ -2056,6 +2149,17 @@
}
+void ActionNode::FillInBMInfo(int offset,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ if (type_ == BEGIN_SUBMATCH) {
+ bm->SetRest(offset);
+ } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
+ on_success()->FillInBMInfo(offset, bm, not_at_start);
+ }
+}
+
+
int AssertionNode::EatsAtLeast(int still_to_find,
int recursion_depth,
bool not_at_start) {
@@ -2072,6 +2176,14 @@
}
+void AssertionNode::FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ // Match the behaviour of EatsAtLeast on this node.
+ if (type() == AT_START && not_at_start) return;
+ on_success()->FillInBMInfo(offset, bm, not_at_start);
+}
+
+
int BackReferenceNode::EatsAtLeast(int still_to_find,
int recursion_depth,
bool not_at_start) {
@@ -2504,6 +2616,16 @@
}
+void LoopChoiceNode::FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool nas) {
+ if (body_can_be_zero_length_) {
+ bm->SetRest(offset);
+ return;
+ }
+ ChoiceNode::FillInBMInfo(offset, bm, nas);
+}
+
+
void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
@@ -3000,6 +3122,30 @@
}
+RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ if (elms_->length() != 1) return NULL;
+ TextElement elm = elms_->at(0);
+ if (elm.type != TextElement::CHAR_CLASS) return NULL;
+ RegExpCharacterClass* node = elm.data.u_char_class;
+ ZoneList<CharacterRange>* ranges = node->ranges();
+ if (!CharacterRange::IsCanonical(ranges)) {
+ CharacterRange::Canonicalize(ranges);
+ }
+ if (node->is_negated()) {
+ return ranges->length() == 0 ? on_success() : NULL;
+ }
+ if (ranges->length() != 1) return NULL;
+ uint32_t max_char;
+ if (compiler->ascii()) {
+ max_char = String::kMaxAsciiCharCode;
+ } else {
+ max_char = String::kMaxUtf16CodeUnit;
+ }
+ return ranges->at(0).IsEverything(max_char) ? on_success() : NULL;
+}
+
+
// Finds the fixed match length of a sequence of nodes that goes from
// this alternative and back to this choice node. If there are variable
// length nodes or other complications in the way then return a sentinel
@@ -3064,8 +3210,8 @@
int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
- bool not_at_start) {
- int preload_characters = EatsAtLeast(4, 0, not_at_start);
+ int eats_at_least) {
+ int preload_characters = Min(4, eats_at_least);
if (compiler->macro_assembler()->CanReadUnaligned()) {
bool ascii = compiler->ascii();
if (ascii) {
@@ -3131,6 +3277,198 @@
};
+BoyerMooreLookahead::BoyerMooreLookahead(
+ int length, int map_length, RegExpCompiler* compiler)
+ : length_(length),
+ map_length_(map_length),
+ compiler_(compiler) {
+ ASSERT(IsPowerOf2(map_length));
+ if (compiler->ascii()) {
+ max_char_ = String::kMaxAsciiCharCode;
+ } else {
+ max_char_ = String::kMaxUtf16CodeUnit;
+ }
+ bitmaps_ = new ZoneList<ZoneList<bool>*>(length);
+ for (int i = 0; i < length; i++) {
+ bitmaps_->Add(new ZoneList<bool>(map_length));
+ ZoneList<bool>* map = bitmaps_->at(i);
+ for (int i = 0; i < map_length; i++) {
+ map->Add(false);
+ }
+ }
+}
+
+
+// Find the longest range of lookahead that has the fewest number of different
+// characters that can occur at a given position. Since we are optimizing two
+// different parameters at once this is a tradeoff.
+bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
+ int biggest_points = 0;
+ for (int max_number_of_chars = 4;
+ max_number_of_chars < kTooManyCharacters;
+ max_number_of_chars *= 2) {
+ biggest_points =
+ FindBestInterval(max_number_of_chars, biggest_points, from, to);
+ }
+ if (biggest_points == 0) return false;
+ return true;
+}
+
+
+// Find the highest-points range between 0 and length_ where the character
+// information is not too vague. 'Too vague' means that there are more than
+// max_number_of_chars that can occur at this position. Calculates the number
+// of points as the product of width-of-the-range and
+// probability-of-finding-one-of-the-characters, where the probability is
+// calculated using the frequency distribution of the sample subject string.
+int BoyerMooreLookahead::FindBestInterval(
+ int max_number_of_chars, int old_biggest_points, int* from, int* to) {
+ int biggest_points = old_biggest_points;
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ for (int i = 0; i < length_; ) {
+ while (i < length_ && Count(i) > max_number_of_chars) i++;
+ if (i == length_) break;
+ int remembered_from = i;
+ bool union_map[kSize];
+ for (int j = 0; j < kSize; j++) union_map[j] = false;
+ while (i < length_ && Count(i) <= max_number_of_chars) {
+ ZoneList<bool>* map = bitmaps_->at(i);
+ for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
+ i++;
+ }
+ int frequency = 0;
+ for (int j = 0; j < kSize; j++) {
+ if (union_map[j]) {
+ // Add 1 to the frequency to give a small per-character boost for
+ // the cases where our sampling is not good enough and many
+ // characters have a frequency of zero. This means the frequency
+ // can theoretically be up to 2*kSize though we treat it mostly as
+ // a fraction of kSize.
+ frequency += compiler_->frequency_collator()->Frequency(j) + 1;
+ }
+ }
+ // We use the probability of skipping times the distance we are skipping to
+ // judge the effectiveness of this. Actually we have a cut-off: By
+ // dividing by 2 we switch off the skipping if the probability of skipping
+ // is less than 50%. This is because the multibyte mask-and-compare
+ // skipping in quickcheck is more likely to do well on this case.
+ bool in_quickcheck_range = ((i - remembered_from < 4) ||
+ (compiler_->ascii() ? remembered_from <= 4 : remembered_from <= 2));
+ // Called 'probability' but it is only a rough estimate and can actually
+ // be outside the 0-kSize range.
+ int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
+ int points = (i - remembered_from) * probability;
+ if (points > biggest_points) {
+ *from = remembered_from;
+ *to = i - 1;
+ biggest_points = points;
+ }
+ }
+ return biggest_points;
+}
+
+
+// Take all the characters that will not prevent a successful match if they
+// occur in the subject string in the range between min_lookahead and
+// max_lookahead (inclusive) measured from the current position. If the
+// character at max_lookahead offset is not one of these characters, then we
+// can safely skip forwards by the number of characters in the range.
+int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
+ int max_lookahead,
+ Handle<ByteArray> boolean_skip_table) {
+ const int kSize = RegExpMacroAssembler::kTableSize;
+
+ const int kSkipArrayEntry = 0;
+ const int kDontSkipArrayEntry = 1;
+
+ for (int i = 0; i < kSize; i++) {
+ boolean_skip_table->set(i, kSkipArrayEntry);
+ }
+ int skip = max_lookahead + 1 - min_lookahead;
+
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ ZoneList<bool>* map = bitmaps_->at(i);
+ for (int j = 0; j < map_length_; j++) {
+ if (map->at(j)) {
+ boolean_skip_table->set(j, kDontSkipArrayEntry);
+ }
+ }
+ }
+
+ return skip;
+}
+
+
+// See comment above on the implementation of GetSkipTable.
+bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+ int min_lookahead = 0;
+ int max_lookahead = 0;
+
+ if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return false;
+
+ bool found_single_character = false;
+ bool abandoned_search_for_single_character = false;
+ int single_character = 0;
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ ZoneList<bool>* map = bitmaps_->at(i);
+ for (int j = 0; j < map_length_; j++) {
+ if (map->at(j)) {
+ if (found_single_character) {
+ found_single_character = false; // Found two.
+ abandoned_search_for_single_character = true;
+ break;
+ } else {
+ found_single_character = true;
+ single_character = j;
+ }
+ }
+ }
+ if (abandoned_search_for_single_character) break;
+ }
+
+ int lookahead_width = max_lookahead + 1 - min_lookahead;
+
+ if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
+ // The mask-compare can probably handle this better.
+ return false;
+ }
+
+ if (found_single_character) {
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ if (max_char_ > map_length_) {
+ ASSERT(map_length_ == RegExpMacroAssembler::kTableSize);
+ masm->CheckCharacterAfterAnd(single_character,
+ RegExpMacroAssembler::kTableMask,
+ &cont);
+ } else {
+ masm->CheckCharacter(single_character, &cont);
+ }
+ masm->AdvanceCurrentPosition(lookahead_width);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+ return true;
+ }
+
+ Handle<ByteArray> boolean_skip_table =
+ FACTORY->NewByteArray(map_length_, TENURED);
+ int skip_distance = GetSkipTable(
+ min_lookahead, max_lookahead, boolean_skip_table);
+ ASSERT(skip_distance != 0);
+
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ masm->CheckBitInTable(boolean_skip_table, &cont);
+ masm->AdvanceCurrentPosition(skip_distance);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+
+ return true;
+}
+
+
/* Code generation for choice nodes.
*
* We generate quick checks that do a mask and compare to eliminate a
@@ -3209,7 +3547,6 @@
* \______________/
*/
-
void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
int choice_count = alternatives_->length();
@@ -3274,10 +3611,48 @@
int first_normal_choice = greedy_loop ? 1 : 0;
- int preload_characters =
- CalculatePreloadCharacters(compiler,
- current_trace->at_start() == Trace::FALSE);
- bool preload_is_current =
+ bool not_at_start = current_trace->at_start() == Trace::FALSE;
+ const int kEatsAtLeastNotYetInitialized = -1;
+ int eats_at_least = kEatsAtLeastNotYetInitialized;
+
+ bool skip_was_emitted = false;
+
+ if (!greedy_loop && choice_count == 2) {
+ GuardedAlternative alt1 = alternatives_->at(1);
+ if (alt1.guards() == NULL || alt1.guards()->length() == 0) {
+ RegExpNode* eats_anything_node = alt1.node();
+ if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) ==
+ this) {
+ // At this point we know that we are at a non-greedy loop that will eat
+ // any character one at a time. Any non-anchored regexp has such a
+ // loop prepended to it in order to find where it starts. We look for
+ // a pattern of the form ...abc... where we can look 6 characters ahead
+ // and step forwards 3 if the character is not one of abc. Abc need
+ // not be atoms, they can be any reasonably limited character class or
+ // small alternation.
+ ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes.
+ eats_at_least =
+ Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+ if (eats_at_least >= 1) {
+ BoyerMooreLookahead bm(eats_at_least,
+ RegExpMacroAssembler::kTableSize,
+ compiler);
+ GuardedAlternative alt0 = alternatives_->at(0);
+ alt0.node()->FillInBMInfo(0, &bm, not_at_start);
+ skip_was_emitted = bm.EmitSkipInstructions(macro_assembler);
+ }
+ }
+ }
+ }
+
+ if (eats_at_least == kEatsAtLeastNotYetInitialized) {
+ // Save some time by looking at most one machine word ahead.
+ eats_at_least = EatsAtLeast(compiler->ascii() ? 4 : 2, 0, not_at_start);
+ }
+ int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
+
+ bool preload_is_current = !skip_was_emitted &&
(current_trace->characters_preloaded() == preload_characters);
bool preload_has_checked_bounds = preload_is_current;
@@ -5432,6 +5807,76 @@
}
+void ChoiceNode::FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ ZoneList<GuardedAlternative>* alts = alternatives();
+ for (int i = 0; i < alts->length(); i++) {
+ GuardedAlternative& alt = alts->at(i);
+ if (alt.guards() != NULL && alt.guards()->length() != 0) {
+ bm->SetRest(offset); // Give up trying to fill in info.
+ return;
+ }
+ alt.node()->FillInBMInfo(offset, bm, not_at_start);
+ }
+}
+
+
+void TextNode::FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ if (offset >= bm->length()) return;
+ int max_char = bm->max_char();
+ for (int i = 0; i < elements()->length(); i++) {
+ if (offset >= bm->length()) return;
+ TextElement text = elements()->at(i);
+ if (text.type == TextElement::ATOM) {
+ RegExpAtom* atom = text.data.u_atom;
+ for (int j = 0; j < atom->length(); j++, offset++) {
+ if (offset >= bm->length()) return;
+ uc16 character = atom->data()[j];
+ if (bm->compiler()->ignore_case()) {
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ int length = GetCaseIndependentLetters(
+ ISOLATE,
+ character,
+ bm->max_char() == String::kMaxAsciiCharCode,
+ chars);
+ for (int j = 0; j < length; j++) {
+ bm->Set(offset, chars[j]);
+ }
+ } else {
+ if (character <= max_char) bm->Set(offset, character);
+ }
+ }
+ } else {
+ ASSERT(text.type == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* char_class = text.data.u_char_class;
+ ZoneList<CharacterRange>* ranges = char_class->ranges();
+ if (char_class->is_negated()) {
+ bm->SetAll(offset);
+ } else {
+ for (int k = 0; k < ranges->length(); k++) {
+ CharacterRange& range = ranges->at(k);
+ if (range.from() > max_char) continue;
+ int to = Min(max_char, static_cast<int>(range.to()));
+ if (to - range.from() >= BoyerMooreLookahead::kTooManyCharacters) {
+ bm->SetAll(offset);
+ break;
+ }
+ for (int m = range.from(); m <= to; m++) {
+ bm->Set(offset, m);
+ }
+ }
+ }
+ offset++;
+ }
+ }
+ if (offset >= bm->length()) return;
+ on_success()->FillInBMInfo(offset,
+ bm,
+ true); // Not at start after a text node.
+}
+
+
int TextNode::ComputeFirstCharacterSet(int budget) {
budget--;
if (budget >= 0) {
@@ -5589,15 +6034,30 @@
}
-RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
- bool ignore_case,
- bool is_multiline,
- Handle<String> pattern,
- bool is_ascii) {
+RegExpEngine::CompilationResult RegExpEngine::Compile(
+ RegExpCompileData* data,
+ bool ignore_case,
+ bool is_multiline,
+ Handle<String> pattern,
+ Handle<String> sample_subject,
+ bool is_ascii) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
return IrregexpRegExpTooBig();
}
RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii);
+
+ // Sample some characters from the middle of the string.
+ static const int kSampleSize = 128;
+
+ FlattenString(sample_subject);
+ int chars_sampled = 0;
+ int half_way = (sample_subject->length() - kSampleSize) / 2;
+ for (int i = Max(0, half_way);
+ i < sample_subject->length() && chars_sampled < kSampleSize;
+ i++, chars_sampled++) {
+ compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
+ }
+
// Wrap the body of the regexp in capture #0.
RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
0,
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 8875de9..288e995 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -190,8 +190,10 @@
static String* last_ascii_string_;
static String* two_byte_cached_string_;
- static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii);
- static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
+ static bool CompileIrregexp(
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
+ static inline bool EnsureCompiledIrregexp(
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
// Set the subject cache. The previous string buffer is not deleted, so the
@@ -419,6 +421,90 @@
};
+// Improve the speed that we scan for an initial point where a non-anchored
+// regexp can match by using a Boyer-Moore-like table. This is done by
+// identifying non-greedy non-capturing loops in the nodes that eat any
+// character one at a time. For example in the middle of the regexp
+// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
+// inserted at the start of any non-anchored regexp.
+//
+// When we have found such a loop we look ahead in the nodes to find the set of
+// characters that can come at given distances. For example for the regexp
+// /.?foo/ we know that there are at least 3 characters ahead of us, and the
+// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
+// the lookahead info where the set of characters is reasonably constrained. In
+// our example this is from index 1 to 2 (0 is not constrained). We can now
+// look 3 characters ahead and if we don't find one of [f, o] (the union of
+// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
+//
+// For Unicode input strings we do the same, but modulo 128.
+//
+// We also look at the first string fed to the regexp and use that to get a hint
+// of the character frequencies in the inputs. This affects the assessment of
+// whether the set of characters is 'reasonably constrained'.
+//
+// We also have another lookahead mechanism (called quick check in the code),
+// which uses a wide load of multiple characters followed by a mask and compare
+// to determine whether a match is possible at this point.
+class BoyerMooreLookahead {
+ public:
+ BoyerMooreLookahead(int length, int map_length, RegExpCompiler* compiler);
+
+ int length() { return length_; }
+ int max_char() { return max_char_; }
+ RegExpCompiler* compiler() { return compiler_; }
+
+ static const int kTooManyCharacters = 32;
+
+ int Count(int map_number) {
+ ZoneList<bool>* map = bitmaps_->at(map_number);
+ if (map == NULL) return map_length_;
+ int count = 0;
+ for (int i = 0; i < map_length_; i++) {
+ if (map->at(i)) count++;
+ }
+ return count;
+ }
+
+ void Set(int map_number, int character) {
+ if (character > max_char_) return;
+ ZoneList<bool>* map = bitmaps_->at(map_number);
+ if (map == NULL) return;
+ map->at(character & (map_length_ - 1)) = true;
+ }
+
+ void SetAll(int map_number) {
+ bitmaps_->at(map_number) = NULL;
+ }
+
+ void SetRest(int from_map) {
+ for (int i = from_map; i < length_; i++) SetAll(i);
+ }
+ bool EmitSkipInstructions(RegExpMacroAssembler* masm);
+
+ private:
+ // This is the value obtained by EatsAtLeast. If we do not have at least this
+ // many characters left in the sample string then the match is bound to fail.
+ // Therefore it is OK to read a character this far ahead of the current match
+ // point.
+ int length_;
+ // We conservatively consider all character values modulo this length. For
+ // ASCII there is no loss of precision, since this has a value of 128.
+ int map_length_;
+ RegExpCompiler* compiler_;
+ // 0x7f for ASCII, 0xffff for UTF-16.
+ int max_char_;
+ ZoneList<ZoneList<bool>*>* bitmaps_;
+
+ int GetSkipTable(int min_lookahead,
+ int max_lookahead,
+ Handle<ByteArray> boolean_skip_table);
+ bool FindWorthwhileInterval(int* from, int* to);
+ int FindBestInterval(
+ int max_number_of_chars, int old_biggest_points, int* from, int* to);
+};
+
+
#define FOR_EACH_NODE_TYPE(VISIT) \
VISIT(End) \
VISIT(Action) \
@@ -635,6 +721,22 @@
bool not_at_start) = 0;
static const int kNodeIsTooComplexForGreedyLoops = -1;
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+ // Only returns the successor for a text node of length 1 that matches any
+ // character and that has no guards on it.
+ virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ return NULL;
+ }
+
+ // Collects information on the possible code units (mod 128) that can match if
+ // we look forward. This is used for a Boyer-Moore-like string searching
+ // implementation. TODO(erikcorry): This should share more code with
+ // EatsAtLeast, GetQuickCheckDetails and ComputeFirstCharacterSet.
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ UNREACHABLE();
+ }
+
Label* label() { return &label_; }
// If non-generic code is generated for a node (i.e. the node is not at the
// start of the trace) then it cannot be reused. This variable sets a limit
@@ -747,6 +849,10 @@
: on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ on_success_->FillInBMInfo(offset, bm, not_at_start);
+ }
private:
RegExpNode* on_success_;
};
@@ -793,6 +899,8 @@
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start);
Type type() { return type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -860,6 +968,10 @@
ZoneList<TextElement>* elements() { return elms_; }
void MakeCaseIndependent(bool is_ascii);
virtual int GreedyLoopTextLength();
+ virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler);
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual TextNode* Clone() {
TextNode* result = new TextNode(*this);
result->CalculateOffsets();
@@ -928,6 +1040,8 @@
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual int ComputeFirstCharacterSet(int budget);
virtual AssertionNode* Clone() { return new AssertionNode(*this); }
AssertionNodeType type() { return type_; }
@@ -961,6 +1075,12 @@
bool not_at_start) {
return;
}
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ // Working out the set of characters that a backreference can match is too
+ // hard, so we just say that any character can match.
+ bm->SetRest(offset);
+ }
virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
virtual int ComputeFirstCharacterSet(int budget);
@@ -986,6 +1106,11 @@
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ // Returning 0 from EatsAtLeast should ensure we never get here.
+ UNREACHABLE();
+ }
virtual EndNode* Clone() { return new EndNode(*this); }
private:
Action action_;
@@ -1071,6 +1196,8 @@
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual ChoiceNode* Clone() { return new ChoiceNode(*this); }
bool being_calculated() { return being_calculated_; }
@@ -1089,7 +1216,7 @@
void GenerateGuard(RegExpMacroAssembler* macro_assembler,
Guard* guard,
Trace* trace);
- int CalculatePreloadCharacters(RegExpCompiler* compiler, bool not_at_start);
+ int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
void EmitOutOfLineContinuation(RegExpCompiler* compiler,
Trace* trace,
GuardedAlternative alternative,
@@ -1119,6 +1246,10 @@
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start);
+ }
// For a negative lookahead we don't emit the quick check for the
// alternative that is expected to fail. This is because quick check code
// starts by loading enough characters for the alternative that takes fewest
@@ -1146,6 +1277,8 @@
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
+ virtual void FillInBMInfo(
+ int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual int ComputeFirstCharacterSet(int budget);
virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
RegExpNode* loop_node() { return loop_node_; }
@@ -1458,6 +1591,7 @@
bool ignore_case,
bool multiline,
Handle<String> pattern,
+ Handle<String> sample_subject,
bool is_ascii);
static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
diff --git a/src/lazy-instance.h b/src/lazy-instance.h
index 4beb55c..4c09b0d 100644
--- a/src/lazy-instance.h
+++ b/src/lazy-instance.h
@@ -65,8 +65,12 @@
// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
// LAZY_INSTANCE_INITIALIZER;
//
-// WARNING: This implementation of LazyInstance is NOT thread-safe by default.
-// See ThreadSafeInitOnceTrait declared below for that.
+// WARNINGS:
+// - This implementation of LazyInstance is NOT THREAD-SAFE by default. See
+// ThreadSafeInitOnceTrait declared below for that.
+// - Lazy initialization comes with a cost. Make sure that you don't use it on
+// critical path. Consider adding your initialization code to a function
+// which is explicitly called once.
//
// Notes for advanced users:
// LazyInstance can actually be used in two different ways:
@@ -107,9 +111,17 @@
// Traits that define how an instance is allocated and accessed.
+// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
+// on MIPS with other compilers.
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
+#else
+#define LAZY_ALIGN(x)
+#endif
+
template <typename T>
struct StaticallyAllocatedInstanceTrait {
- typedef char StorageType[sizeof(T)];
+ typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
@@ -121,6 +133,8 @@
}
};
+#undef LAZY_ALIGN
+
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
@@ -208,7 +222,8 @@
mutable OnceType once_;
// Note that the previous field, OnceType, is an AtomicWord which guarantees
- // the correct alignment of the storage field below.
+ // 4-byte alignment of the storage field below. If compiling with GCC (>4.2),
+ // the LAZY_ALIGN macro above will guarantee correctness for any alignment.
mutable StorageType storage_;
};
diff --git a/src/log.cc b/src/log.cc
index 21d64df..d93a9d8 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1730,13 +1730,20 @@
}
// Protects the state below.
-static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
+static Mutex* active_samplers_mutex = NULL;
List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
+void SamplerRegistry::SetUp() {
+ if (!active_samplers_mutex) {
+ active_samplers_mutex = OS::CreateMutex();
+ }
+}
+
+
bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(active_samplers_mutex.Pointer());
+ ScopedLock lock(active_samplers_mutex);
for (int i = 0;
ActiveSamplersExist() && i < active_samplers_->length();
++i) {
@@ -1763,7 +1770,7 @@
void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex.Pointer());
+ ScopedLock lock(active_samplers_mutex);
if (active_samplers_ == NULL) {
active_samplers_ = new List<Sampler*>;
} else {
@@ -1775,7 +1782,7 @@
void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex.Pointer());
+ ScopedLock lock(active_samplers_mutex);
ASSERT(active_samplers_ != NULL);
bool removed = active_samplers_->RemoveElement(sampler);
ASSERT(removed);
diff --git a/src/log.h b/src/log.h
index 1297387..03c7b3b 100644
--- a/src/log.h
+++ b/src/log.h
@@ -437,6 +437,8 @@
HAS_CPU_PROFILING_SAMPLERS
};
+ static void SetUp();
+
typedef void (*VisitSampler)(Sampler*, void*);
static State GetState();
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 0d7f921..fa64e1e 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -143,7 +143,7 @@
27, // k1
28, // gp
29, // sp
- 30, // s8_fp
+ 30, // fp
31, // ra
};
return kNumbers[reg.code()];
@@ -163,7 +163,7 @@
k0, k1,
gp,
sp,
- s8_fp,
+ fp,
ra
};
return kRegisters[num];
@@ -237,28 +237,28 @@
static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
- | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+ | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
| (0 & kImm16Mask);
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
| (0 & kImm16Mask);
-const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
| (kNegOffset & kImm16Mask);
-const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
| (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 8b877f6..5e67d0c 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -125,40 +125,59 @@
int code_;
};
-const Register no_reg = { -1 };
+#define REGISTER(N, C) \
+ const int kRegister_ ## N ## _Code = C; \
+ const Register N = { C }
-const Register zero_reg = { 0 }; // Always zero.
-const Register at = { 1 }; // at: Reserved for synthetic instructions.
-const Register v0 = { 2 }; // v0, v1: Used when returning multiple values
-const Register v1 = { 3 }; // from subroutines.
-const Register a0 = { 4 }; // a0 - a4: Used to pass non-FP parameters.
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 }; // t0 - t9: Can be used without reservation, act
-const Register t1 = { 9 }; // as temporary registers and are allowed to
-const Register t2 = { 10 }; // be destroyed by subroutines.
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 }; // s0 - s7: Subroutine register variables.
-const Register s1 = { 17 }; // Subroutines that write to these registers
-const Register s2 = { 18 }; // must restore their values before exiting so
-const Register s3 = { 19 }; // that the caller can expect the values to be
-const Register s4 = { 20 }; // preserved.
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 }; // k0, k1: Reserved for system calls and
-const Register k1 = { 27 }; // interrupt handlers.
-const Register gp = { 28 }; // gp: Reserved.
-const Register sp = { 29 }; // sp: Stack pointer.
-const Register s8_fp = { 30 }; // fp: Frame pointer.
-const Register ra = { 31 }; // ra: Return address pointer.
+REGISTER(no_reg, -1);
+// Always zero.
+REGISTER(zero_reg, 0);
+// at: Reserved for synthetic instructions.
+REGISTER(at, 1);
+// v0, v1: Used when returning multiple values from subroutines.
+REGISTER(v0, 2);
+REGISTER(v1, 3);
+// a0 - a4: Used to pass non-FP parameters.
+REGISTER(a0, 4);
+REGISTER(a1, 5);
+REGISTER(a2, 6);
+REGISTER(a3, 7);
+// t0 - t9: Can be used without reservation, act as temporary registers and are
+// allowed to be destroyed by subroutines.
+REGISTER(t0, 8);
+REGISTER(t1, 9);
+REGISTER(t2, 10);
+REGISTER(t3, 11);
+REGISTER(t4, 12);
+REGISTER(t5, 13);
+REGISTER(t6, 14);
+REGISTER(t7, 15);
+// s0 - s7: Subroutine register variables. Subroutines that write to these
+// registers must restore their values before exiting so that the caller can
+// expect the values to be preserved.
+REGISTER(s0, 16);
+REGISTER(s1, 17);
+REGISTER(s2, 18);
+REGISTER(s3, 19);
+REGISTER(s4, 20);
+REGISTER(s5, 21);
+REGISTER(s6, 22);
+REGISTER(s7, 23);
+REGISTER(t8, 24);
+REGISTER(t9, 25);
+// k0, k1: Reserved for system calls and interrupt handlers.
+REGISTER(k0, 26);
+REGISTER(k1, 27);
+// gp: Reserved.
+REGISTER(gp, 28);
+// sp: Stack pointer.
+REGISTER(sp, 29);
+// fp: Frame pointer.
+REGISTER(fp, 30);
+// ra: Return address pointer.
+REGISTER(ra, 31);
+
+#undef REGISTER
int ToNumber(Register reg);
@@ -303,7 +322,6 @@
static const Register& kLithiumScratchReg2 = s4; // Scratch register.
static const Register& kRootRegister = s6; // Roots array pointer.
static const Register& cp = s7; // JavaScript context pointer.
-static const Register& fp = s8_fp; // Alias for fp.
static const DoubleRegister& kLithiumScratchDouble = f30;
static const FPURegister& kDoubleRegZero = f28;
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 1b3242c..3e7b5bf 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -481,7 +481,7 @@
__ Branch(¬_special, gt, source_, Operand(1));
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
+ const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
// Safe to use 'at' as dest reg here.
__ Or(at, exponent, Operand(exponent_word_for_1));
@@ -4421,7 +4421,7 @@
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
- static const int kDisplacement =
+ const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smiGenerateReadElement.
@@ -4833,10 +4833,10 @@
// sp[8]: subject string
// sp[12]: JSRegExp object
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
Isolate* isolate = masm->isolate();
@@ -5045,8 +5045,8 @@
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- static const int kParameterRegisters = 4;
+ const int kRegExpExecuteArguments = 8;
+ const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -5931,7 +5931,7 @@
// scratch: -
// Perform a number of probes in the symbol table.
- static const int kProbes = 4;
+ const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@@ -6059,9 +6059,9 @@
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
__ lw(a2, MemOperand(sp, kToOffset));
__ lw(a3, MemOperand(sp, kFromOffset));
@@ -7356,43 +7356,46 @@
RememberedSetAction action;
};
+#define REG(Name) { kRegister_ ## Name ## _Code }
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { s2, s0, t3, EMIT_REMEMBERED_SET },
- { s2, a2, t3, EMIT_REMEMBERED_SET },
+ { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
+ { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
- { a3, t0, t1, EMIT_REMEMBERED_SET },
+ { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
- { t0, a1, a2, OMIT_REMEMBERED_SET },
+ { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a1, a2, a3, EMIT_REMEMBERED_SET },
- { a3, a2, a1, EMIT_REMEMBERED_SET },
+ { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a2, a1, a3, EMIT_REMEMBERED_SET },
- { a3, a1, a2, EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { a3, a2, t0, EMIT_REMEMBERED_SET },
- { a2, a3, t0, EMIT_REMEMBERED_SET },
+ { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { a2, a3, t5, EMIT_REMEMBERED_SET },
- { a2, a3, t5, OMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
- { t2, a2, a0, EMIT_REMEMBERED_SET },
- { a2, t2, t5, EMIT_REMEMBERED_SET },
+ { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
+ { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
- { t1, a0, t2, EMIT_REMEMBERED_SET },
+ { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
+
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7419,7 +7422,7 @@
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 64cdc8e..2afad13 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -2977,7 +2977,7 @@
__ Move(f14, zero_reg, a1);
// Subtract and store the result in the heap number.
__ sub_d(f0, f12, f14);
- __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ mov(v0, s0);
} else {
__ PrepareCallCFunction(2, a0);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 2af9d6f..e4de40f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -4367,6 +4367,14 @@
deferred->entry(),
TAG_OBJECT);
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
@@ -4385,14 +4393,14 @@
__ sw(scratch, FieldMemOperand(result, property_offset));
}
}
-
- __ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4400,9 +4408,9 @@
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ LoadHeapObject(a0, constructor);
+ __ li(a0, Operand(Smi::FromInt(instance_size)));
__ push(a0);
- CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(v0, result);
}
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 41b060d..4a5fbe3 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -33,8 +33,6 @@
namespace v8 {
namespace internal {
-static const Register kSavedValueRegister = kLithiumScratchReg;
-
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32),
@@ -170,9 +168,9 @@
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
- __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+ __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
@@ -189,11 +187,11 @@
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
- // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
+ // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
- __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
@@ -245,8 +243,8 @@
__ sw(at, destination_operand);
}
} else {
- __ lw(kSavedValueRegister, source_operand);
- __ sw(kSavedValueRegister, destination_operand);
+ __ lw(kLithiumScratchReg, source_operand);
+ __ sw(kLithiumScratchReg, destination_operand);
}
}
@@ -263,13 +261,13 @@
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsInteger32(constant_source)) {
- __ li(kSavedValueRegister,
+ __ li(kLithiumScratchReg,
Operand(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(kSavedValueRegister,
+ __ LoadObject(kLithiumScratchReg,
cgen_->ToHandle(constant_source));
}
- __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
@@ -291,15 +289,15 @@
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kLithiumScratchDouble was used to break the cycle,
- // but kSavedValueRegister is free.
+ // but kLithiumScratchReg is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
- __ lw(kSavedValueRegister, source_operand);
- __ sw(kSavedValueRegister, destination_operand);
- __ lw(kSavedValueRegister, source_high_operand);
- __ sw(kSavedValueRegister, destination_high_operand);
+ __ lw(kLithiumScratchReg, source_operand);
+ __ sw(kLithiumScratchReg, destination_operand);
+ __ lw(kLithiumScratchReg, source_high_operand);
+ __ sw(kLithiumScratchReg, destination_high_operand);
} else {
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index ae4da93..dde4a65 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -482,6 +482,42 @@
}
+void RegExpMacroAssemblerMIPS::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Subu(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Subu(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Addu(a0, a0, a1);
+ } else {
+ __ Addu(a0, a0, current_character());
+ }
+
+ __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+
bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 294bc0a..4d18c94 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1737,14 +1737,14 @@
// expensive shift first, and use an offset later on.
__ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(elements, elements, t1);
- __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Branch(&call_builtin, eq, v0, Operand(t2));
// Set the array's length.
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
- __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1);
__ Ret();
@@ -3496,7 +3496,7 @@
CpuFeatures::Scope scope(FPU);
__ mtc1(value, f0);
__ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
Register dst1 = t2;
@@ -3544,7 +3544,7 @@
__ Cvt_d_uw(f0, value, f22);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
@@ -3598,7 +3598,7 @@
__ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The float (single) value is already in fpu reg f0 (if we use float).
__ cvt_d_s(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
diff --git a/src/objects-inl.h b/src/objects-inl.h
index bf86a82..49c8db8 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3004,26 +3004,26 @@
bool Code::optimizable() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
}
void Code::set_optimizable(bool value) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
}
bool Code::has_deoptimization_support() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
}
void Code::set_has_deoptimization_support(bool value) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -3031,14 +3031,14 @@
bool Code::has_debug_break_slots() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
}
void Code::set_has_debug_break_slots(bool value) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -3046,14 +3046,14 @@
bool Code::is_compiled_optimizable() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsIsCompiledOptimizable::decode(flags);
}
void Code::set_compiled_optimizable(bool value) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -3061,26 +3061,26 @@
int Code::allow_osr_at_loop_nesting_level() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
}
void Code::set_allow_osr_at_loop_nesting_level(int level) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
}
int Code::profiler_ticks() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kProfilerTicksOffset);
}
void Code::set_profiler_ticks(int ticks) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
ASSERT(ticks < 256);
WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
}
@@ -3112,13 +3112,13 @@
unsigned Code::stack_check_table_offset() {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
}
void Code::set_stack_check_table_offset(unsigned offset) {
- ASSERT(kind() == FUNCTION);
+ ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
}
diff --git a/src/objects.cc b/src/objects.cc
index b3e79f2..a4f63a1 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1390,9 +1390,11 @@
case EXTERNAL_FLOAT_ARRAY_TYPE:
case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
+ shared->SharedFunctionInfoIterateBody(v);
break;
+ }
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
@@ -3751,13 +3753,11 @@
MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
StringDictionary* dictionary;
if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
- DONT_ENUM,
- kNonStrictMode);
- if (store_result->IsFailure()) return store_result;
- return dictionary;
+ // Using AddProperty or SetPropertyPostInterceptor here could fail, because
+ // object might be non-extensible.
+ return HasFastProperties()
+ ? AddFastProperty(GetHeap()->hidden_symbol(), dictionary, DONT_ENUM)
+ : AddSlowProperty(GetHeap()->hidden_symbol(), dictionary, DONT_ENUM);
}
@@ -7928,6 +7928,12 @@
}
+void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
+ v->VisitSharedFunctionInfo(this);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+}
+
+
#define DECLARE_TAG(ignore1, name, ignore2) name,
const char* const VisitorSynchronization::kTags[
VisitorSynchronization::kNumberOfSyncTags] = {
@@ -7985,7 +7991,6 @@
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
-
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
VisitPointer(rinfo->target_object_address());
diff --git a/src/objects.h b/src/objects.h
index ca6acf5..76d8351 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -5490,6 +5490,8 @@
static bool CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
+ void SharedFunctionInfoIterateBody(ObjectVisitor* v);
+
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -8538,6 +8540,8 @@
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+ virtual void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {}
+
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index dd7253b..8b1e381 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -62,22 +62,8 @@
static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
@@ -634,8 +620,14 @@
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -646,7 +638,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -732,7 +724,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SamplerThread* instance_;
private:
@@ -740,10 +732,23 @@
};
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
+void OS::SetUp() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SamplerThread::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 6a004ea..6b1c987 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -80,22 +80,8 @@
static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
@@ -730,8 +716,14 @@
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Install a signal handler.
@@ -751,7 +743,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -844,7 +836,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -853,12 +845,25 @@
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
+void OS::SetUp() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SignalSender::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index ca394aa..9bea32d 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -46,9 +46,9 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#ifdef __GLIBC__
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // def __GLIBC__
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -79,37 +79,8 @@
static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-
-#ifdef __arm__
- // When running on ARM hardware check that the EABI used by V8 and
- // by the C code is the same.
- bool hard_float = OS::ArmUsingHardFloat();
- if (hard_float) {
-#if !USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- } else {
-#if USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- }
-#endif
-}
-
-
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
@@ -564,7 +535,7 @@
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
-#ifdef __GLIBC__
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
@@ -589,9 +560,9 @@
free(symbols);
return frames_count;
-#else // ndef __GLIBC__
+#else // defined(__GLIBC__) && !defined(__UCLIBC__)
return 0;
-#endif // ndef __GLIBC__
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
}
@@ -1084,6 +1055,12 @@
vm_tgid_(getpid()),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
@@ -1101,7 +1078,7 @@
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -1114,7 +1091,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1217,7 +1194,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -1227,12 +1204,40 @@
};
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
+void OS::SetUp() {
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+
+#ifdef __arm__
+ // When running on ARM hardware check that the EABI used by V8 and
+ // by the C code is the same.
+ bool hard_float = OS::ArmUsingHardFloat();
+ if (hard_float) {
+#if !USE_EABI_HARDFLOAT
+ PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
+ "-DUSE_EABI_HARDFLOAT\n");
+ exit(1);
+#endif
+ } else {
+#if USE_EABI_HARDFLOAT
+ PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
+ "-DUSE_EABI_HARDFLOAT\n");
+ exit(1);
+#endif
+ }
+#endif
+ SignalSender::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index dbcd80e..afcd80a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -94,18 +94,8 @@
static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
@@ -753,8 +743,14 @@
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -765,7 +761,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -862,7 +858,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SamplerThread* instance_;
private:
@@ -872,10 +868,19 @@
#undef REGISTER_FIELD
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
+void OS::SetUp() {
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SamplerThread::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 6a06e3e..2b2d530 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -100,18 +100,8 @@
}
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
@@ -803,6 +793,12 @@
vm_tgid_(getpid()),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
@@ -820,7 +816,7 @@
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -833,7 +829,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -927,7 +923,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -937,12 +933,21 @@
};
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
+void OS::SetUp() {
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SignalSender::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 59066ea..6631659 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -147,15 +147,6 @@
#undef MATH_FUNCTION
-void MathSetup() {
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
- init_fast_sqrt_function();
-}
-
-
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
@@ -313,20 +304,11 @@
#if defined(V8_TARGET_ARCH_IA32)
static OS::MemCopyFunction memcopy_function = NULL;
-static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
// Defined in codegen-ia32.cc.
OS::MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
void OS::MemCopy(void* dest, const void* src, size_t size) {
- if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex.Pointer());
- if (memcopy_function == NULL) {
- OS::MemCopyFunction temp = CreateMemCopyFunction();
- MemoryBarrier();
- memcopy_function = temp;
- }
- }
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
(*memcopy_function)(dest, src, size);
@@ -336,6 +318,18 @@
}
#endif // V8_TARGET_ARCH_IA32
+
+void POSIXPostSetUp() {
+#if defined(V8_TARGET_ARCH_IA32)
+ memcopy_function = CreateMemCopyFunction();
+#endif
+ init_fast_sin_function();
+ init_fast_cos_function();
+ init_fast_tan_function();
+ init_fast_log_function();
+ init_fast_sqrt_function();
+}
+
// ----------------------------------------------------------------------------
// POSIX string support.
//
diff --git a/src/platform-posix.h b/src/platform-posix.h
index 4ae0e52..7a982ed 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -31,9 +31,8 @@
namespace v8 {
namespace internal {
-// Used by platform implementation files during OS::PostSetUp() to initialize
-// the math functions.
-void MathSetup();
+// Used by platform implementation files during OS::PostSetUp().
+void POSIXPostSetUp();
} } // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index e044dbc..be8bbfc 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -91,22 +91,10 @@
static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
void OS::PostSetUp() {
- // Math functions depend on CPU features therefore they are initialized after
- // CPU.
- MathSetup();
+ POSIXPostSetUp();
}
@@ -724,6 +712,12 @@
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
@@ -741,7 +735,7 @@
}
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
@@ -754,7 +748,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -848,7 +842,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
@@ -857,12 +851,25 @@
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
+void OS::SetUp() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly will cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SignalSender::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 3adae18..e36fc87 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -141,20 +141,11 @@
#if defined(V8_TARGET_ARCH_IA32)
static OS::MemCopyFunction memcopy_function = NULL;
-static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
// Defined in codegen-ia32.cc.
OS::MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
void OS::MemCopy(void* dest, const void* src, size_t size) {
- if (memcopy_function == NULL) {
- ScopedLock lock(memcopy_function_mutex.Pointer());
- if (memcopy_function == NULL) {
- OS::MemCopyFunction temp = CreateMemCopyFunction();
- MemoryBarrier();
- memcopy_function = temp;
- }
- }
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
(*memcopy_function)(dest, src, size);
@@ -563,22 +554,13 @@
}
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
void OS::PostSetUp() {
// Math functions depend on CPU features therefore they are initialized after
// CPU.
MathSetup();
+#if defined(V8_TARGET_ARCH_IA32)
+ memcopy_function = CreateMemCopyFunction();
+#endif
}
@@ -1967,8 +1949,14 @@
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
+ static void SetUp() {
+ if (!mutex_) {
+ mutex_ = OS::CreateMutex();
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
instance_ = new SamplerThread(sampler->interval());
@@ -1979,7 +1967,7 @@
}
static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_.Pointer());
+ ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -2065,7 +2053,7 @@
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
- static LazyMutex mutex_;
+ static Mutex* mutex_;
static SamplerThread* instance_;
private:
@@ -2073,10 +2061,23 @@
};
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
+void OS::SetUp() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srand(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+ SamplerThread::SetUp();
+}
+
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2315fc5..683fec1 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -975,6 +975,7 @@
name_ = name;
self_size_ = self_size;
retained_size_ = 0;
+ entry_index_ = -1;
children_count_ = children_count;
retainers_count_ = retainers_count;
dominator_ = NULL;
@@ -1108,7 +1109,7 @@
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 36;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
@@ -1133,7 +1134,6 @@
gc_roots_entry_(NULL),
natives_root_entry_(NULL),
raw_entries_(NULL),
- entries_sorted_(false),
max_snapshot_js_object_id_(0) {
STATIC_CHECK(
sizeof(HeapGraphEdge) ==
@@ -1185,6 +1185,7 @@
HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
ASSERT(root_entry_ == NULL);
+ ASSERT(entries_.is_empty()); // Root entry must be the first one.
return (root_entry_ = AddEntry(HeapEntry::kObject,
"",
HeapObjectsMap::kInternalRootObjectId,
@@ -1285,11 +1286,11 @@
List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (!entries_sorted_) {
- entries_.Sort(SortByIds);
- entries_sorted_ = true;
+ if (sorted_entries_.is_empty()) {
+ sorted_entries_.AddAll(entries_);
+ sorted_entries_.Sort(SortByIds);
}
- return &entries_;
+ return &sorted_entries_;
}
@@ -1514,20 +1515,39 @@
}
-void HeapEntriesMap::AllocateEntries() {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
+void HeapEntriesMap::AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(map_entry->value);
entry_info->entry = entry_info->allocator->AllocateEntry(
- p->key,
+ map_entry->key,
entry_info->children_count,
entry_info->retainers_count);
ASSERT(entry_info->entry != NULL);
ASSERT(entry_info->entry != kHeapEntryPlaceholder);
entry_info->children_count = 0;
entry_info->retainers_count = 0;
+}
+
+
+void HeapEntriesMap::AllocateEntries(HeapThing root_object) {
+ HashMap::Entry* root_entry =
+ entries_.Lookup(root_object, Hash(root_object), false);
+ ASSERT(root_entry != NULL);
+ // Make sure root entry is allocated first.
+ AllocateHeapEntryForMapEntry(root_entry);
+ void* root_entry_value = root_entry->value;
+ // Remove the root object from map while iterating through other entries.
+ entries_.Remove(root_object, Hash(root_object));
+ root_entry = NULL;
+
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ AllocateHeapEntryForMapEntry(p);
}
+
+ // Insert root entry back.
+ root_entry = entries_.Lookup(root_object, Hash(root_object), true);
+ root_entry->value = root_entry_value;
}
@@ -3106,7 +3126,7 @@
entries_.total_retainers_count());
// Allocate heap objects to entries hash map.
- entries_.AllocateEntries();
+ entries_.AllocateEntries(V8HeapExplorer::kInternalRootObject);
// Pass 2. Fill references.
if (!FillReferences()) return false;
@@ -3353,9 +3373,7 @@
MaybeWriteChunk();
}
}
- void AddNumber(int n) { AddNumberImpl<int>(n, "%d"); }
void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
- void AddNumber(uint64_t n) { AddNumberImpl<uint64_t>(n, "%llu"); }
void Finalize() {
if (aborted_) return;
ASSERT(chunk_pos_ < chunk_size_);
@@ -3419,7 +3437,6 @@
}
// Since nodes graph is cyclic, we need the first pass to enumerate
// them. Strings can be serialized in one pass.
- EnumerateNodes();
SerializeImpl();
delete writer_;
@@ -3472,34 +3489,6 @@
}
-class HeapSnapshotJSONSerializerEnumerator {
- public:
- explicit HeapSnapshotJSONSerializerEnumerator(HeapSnapshotJSONSerializer* s)
- : s_(s) {
- }
- void Apply(HeapEntry** entry) {
- s_->GetNodeId(*entry);
- }
- private:
- HeapSnapshotJSONSerializer* s_;
-};
-
-void HeapSnapshotJSONSerializer::EnumerateNodes() {
- GetNodeId(snapshot_->root()); // Make sure root gets the first id.
- HeapSnapshotJSONSerializerEnumerator iter(this);
- snapshot_->IterateEntries(&iter);
-}
-
-
-int HeapSnapshotJSONSerializer::GetNodeId(HeapEntry* entry) {
- HashMap::Entry* cache_entry = nodes_.Lookup(entry, ObjectHash(entry), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_node_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
HashMap::Entry* cache_entry = strings_.Lookup(
const_cast<char*>(s), ObjectHash(s), true);
@@ -3510,6 +3499,31 @@
}
+// This function won't work correctly for MIN_INT but this is not
+// a problem in case of heap snapshots serialization.
+static int itoa(int value, const Vector<char>& buffer, int buffer_pos) {
+ if (value < 0) {
+ buffer[buffer_pos++] = '-';
+ value = -value;
+ }
+
+ int number_of_digits = 0;
+ int t = value;
+ do {
+ ++number_of_digits;
+ } while (t /= 10);
+
+ buffer_pos += number_of_digits;
+ int result = buffer_pos;
+ do {
+ int last_digit = value % 10;
+ buffer[--buffer_pos] = '0' + last_digit;
+ value /= 10;
+ } while (value);
+ return result;
+}
+
+
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
// The buffer needs space for 3 ints, 3 commas and \0
static const int kBufferSize =
@@ -3519,13 +3533,14 @@
|| edge->type() == HeapGraphEdge::kHidden
|| edge->type() == HeapGraphEdge::kWeak
? edge->index() : GetStringId(edge->name());
- STATIC_CHECK(sizeof(int) == sizeof(edge->type())); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(edge_name_or_index)); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(edge->to()))); // NOLINT
- int result = OS::SNPrintF(buffer, ",%d,%d,%d",
- edge->type(), edge_name_or_index, GetNodeId(edge->to()));
- USE(result);
- ASSERT(result != -1);
+ int buffer_pos = 0;
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(edge->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(edge_name_or_index, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(edge->to()->entry_index(), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
@@ -3538,23 +3553,23 @@
+ 7 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
Vector<HeapGraphEdge> children = entry->children();
- STATIC_CHECK(sizeof(int) == sizeof(entry->type())); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(GetStringId(entry->name()))); // NOLINT
- STATIC_CHECK(sizeof(unsigned) == sizeof(entry->id())); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(entry->self_size())); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(entry->retained_size())); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(entry->dominator()))); // NOLINT
- STATIC_CHECK(sizeof(int) == sizeof(children.length())); // NOLINT
- int result = OS::SNPrintF(buffer, "\n,%d,%d,%u,%d,%d,%d,%d",
- entry->type(),
- GetStringId(entry->name()),
- entry->id(),
- entry->self_size(),
- entry->retained_size(),
- GetNodeId(entry->dominator()),
- children.length());
- USE(result);
- ASSERT(result != -1);
+ int buffer_pos = 0;
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(GetStringId(entry->name()), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->self_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->retained_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->dominator()->entry_index(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(children.length(), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
for (int i = 0; i < children.length(); ++i) {
SerializeEdge(&children[i]);
@@ -3621,23 +3636,25 @@
const int node_fields_count = 7;
// type,name,id,self_size,retained_size,dominator,children_count.
const int edge_fields_count = 3; // type,name|index,to_node.
- List<HashMap::Entry*> sorted_nodes;
- SortHashMap(&nodes_, &sorted_nodes);
- // Rewrite node ids, so they refer to actual array positions.
- if (sorted_nodes.length() > 1) {
+
+ List<HeapEntry*>& nodes = *(snapshot_->entries());
+ // Root must be the first.
+ ASSERT(nodes.first() == snapshot_->root());
+ // Rewrite node indexes, so they refer to actual array positions. Do this
+ // only once.
+ if (nodes[0]->entry_index() == -1) {
// Nodes start from array index 1.
- int prev_value = 1;
- sorted_nodes[0]->value = reinterpret_cast<void*>(prev_value);
- for (int i = 1; i < sorted_nodes.length(); ++i) {
- HeapEntry* prev_heap_entry =
- reinterpret_cast<HeapEntry*>(sorted_nodes[i-1]->key);
- prev_value += node_fields_count +
- prev_heap_entry->children().length() * edge_fields_count;
- sorted_nodes[i]->value = reinterpret_cast<void*>(prev_value);
+ int index = 1;
+ for (int i = 0; i < nodes.length(); ++i) {
+ HeapEntry* node = nodes[i];
+ node->set_entry_index(index);
+ index += node_fields_count +
+ node->children().length() * edge_fields_count;
}
}
- for (int i = 0; i < sorted_nodes.length(); ++i) {
- SerializeNode(reinterpret_cast<HeapEntry*>(sorted_nodes[i]->key));
+
+ for (int i = 0; i < nodes.length(); ++i) {
+ SerializeNode(nodes[i]);
if (writer_->aborted()) return;
}
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index b9de69b..1fa647e 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -549,6 +549,8 @@
void set_retained_size(int value) { retained_size_ = value; }
int ordered_index() { return ordered_index_; }
void set_ordered_index(int value) { ordered_index_ = value; }
+ int entry_index() { return entry_index_; }
+ void set_entry_index(int value) { entry_index_ = value; }
Vector<HeapGraphEdge> children() {
return Vector<HeapGraphEdge>(children_arr(), children_count_); }
@@ -606,6 +608,7 @@
int ordered_index_; // Used during dominator tree building.
int retained_size_; // At that moment, there is no retained size yet.
};
+ int entry_index_;
SnapshotObjectId id_;
HeapEntry* dominator_;
HeapSnapshot* snapshot_;
@@ -667,8 +670,6 @@
void ClearPaint();
HeapEntry* GetEntryById(SnapshotObjectId id);
List<HeapEntry*>* GetSortedEntriesList();
- template<class Visitor>
- void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
void SetDominatorsToSelf();
void Print(int max_depth);
@@ -687,7 +688,7 @@
HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
char* raw_entries_;
List<HeapEntry*> entries_;
- bool entries_sorted_;
+ List<HeapEntry*> sorted_entries_;
size_t raw_entries_size_;
SnapshotObjectId max_snapshot_js_object_id_;
@@ -815,7 +816,7 @@
HeapEntriesMap();
~HeapEntriesMap();
- void AllocateEntries();
+ void AllocateEntries(HeapThing root_object);
HeapEntry* Map(HeapThing thing);
void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
void CountReference(HeapThing from, HeapThing to,
@@ -842,6 +843,8 @@
int retainers_count;
};
+ static inline void AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry);
+
static uint32_t Hash(HeapThing thing) {
return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
@@ -1122,7 +1125,6 @@
public:
explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
: snapshot_(snapshot),
- nodes_(ObjectsMatch),
strings_(ObjectsMatch),
next_node_id_(1),
next_string_id_(1),
@@ -1141,9 +1143,7 @@
v8::internal::kZeroHashSeed);
}
- void EnumerateNodes();
HeapSnapshot* CreateFakeSnapshot();
- int GetNodeId(HeapEntry* entry);
int GetStringId(const char* s);
void SerializeEdge(HeapGraphEdge* edge);
void SerializeImpl();
@@ -1157,7 +1157,6 @@
static const int kMaxSerializableSnapshotRawSize;
HeapSnapshot* snapshot_;
- HashMap nodes_;
HashMap strings_;
int next_node_id_;
int next_string_id_;
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 55f93ee..e58ddb4 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -111,7 +111,7 @@
void Processor::VisitExpressionStatement(ExpressionStatement* node) {
// Rewrite : <x>; -> .result = <x>;
- if (!is_set_) {
+ if (!is_set_ && !node->expression()->IsThrow()) {
node->set_expression(SetResult(node->expression()));
if (!in_try_) is_set_ = true;
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 7d51190..568e48e 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -101,7 +101,7 @@
}
-void RuntimeProfiler::GlobalSetup() {
+void RuntimeProfiler::GlobalSetUp() {
ASSERT(!has_been_globally_set_up_);
enabled_ = V8::UseCrankshaft() && FLAG_opt;
#ifdef DEBUG
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 35213a8..ab6cb37 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -43,7 +43,7 @@
public:
explicit RuntimeProfiler(Isolate* isolate);
- static void GlobalSetup();
+ static void GlobalSetUp();
static inline bool IsEnabled() {
ASSERT(has_been_globally_set_up_);
diff --git a/src/runtime.cc b/src/runtime.cc
index 0726b3d..f9b5fde 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4669,7 +4669,7 @@
HandleScope scope;
Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+ Handle<JSArray> boilerplate(JSArray::cast(raw_boilerplate_object));
#if DEBUG
ElementsKind elements_kind = object->GetElementsKind();
#endif
@@ -4680,19 +4680,23 @@
if (value->IsNumber()) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
- JSObject::TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
+ if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
+ FAST_DOUBLE_ELEMENTS)) {
+ JSObject::TransitionElementsKind(boilerplate, FAST_DOUBLE_ELEMENTS);
+ }
ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
- FixedDoubleArray* double_array =
- FixedDoubleArray::cast(object->elements());
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
HeapNumber* number = HeapNumber::cast(*value);
double_array->set(store_index, number->Number());
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS);
JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
- JSObject::TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
- FixedArray* object_array =
- FixedArray::cast(object->elements());
+ if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
+ FAST_ELEMENTS)) {
+ JSObject::TransitionElementsKind(boilerplate, FAST_ELEMENTS);
+ }
+ FixedArray* object_array = FixedArray::cast(object->elements());
object_array->set(store_index, *value);
}
return *object;
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 3709009..55bf222 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -164,7 +164,7 @@
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+ ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
diff --git a/src/spaces.cc b/src/spaces.cc
index defe352..6144464 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -565,11 +565,10 @@
}
-Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
+Page* MemoryAllocator::AllocatePage(intptr_t size,
+ PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
- executable,
- owner);
+ MemoryChunk* chunk = AllocateChunk(size, executable, owner);
if (chunk == NULL) return NULL;
@@ -578,8 +577,8 @@
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
- Executability executable,
- Space* owner) {
+ Space* owner,
+ Executability executable) {
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
@@ -833,7 +832,6 @@
bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % AreaSize() == 0);
- ASSERT(Capacity() % AreaSize() == 0);
if (Capacity() == max_capacity_) return false;
@@ -848,8 +846,14 @@
bool PagedSpace::Expand() {
if (!CanExpand()) return false;
- Page* p = heap()->isolate()->memory_allocator()->
- AllocatePage(this, executable());
+ intptr_t size = AreaSize();
+
+ if (anchor_.next_page() == &anchor_) {
+ size = SizeOfFirstPage();
+ }
+
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
+ size, this, executable());
if (p == NULL) return false;
ASSERT(Capacity() <= max_capacity_);
@@ -860,6 +864,38 @@
}
+intptr_t PagedSpace::SizeOfFirstPage() {
+ int size = 0;
+ switch (identity()) {
+ case OLD_POINTER_SPACE:
+ size = 64 * kPointerSize * KB;
+ break;
+ case OLD_DATA_SPACE:
+ size = 192 * KB;
+ break;
+ case MAP_SPACE:
+ size = 128 * KB;
+ break;
+ case CELL_SPACE:
+ size = 96 * KB;
+ break;
+ case CODE_SPACE:
+ if (kPointerSize == 8) {
+ // On x64 we allocate code pages in a special way (from the reserved
+ // 2Byte area). That part of the code is not yet upgraded to handle
+ // small pages.
+ size = AreaSize();
+ } else {
+ size = 384 * KB;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return Min(size, AreaSize());
+}
+
+
int PagedSpace::CountTotalPages() {
PageIterator it(this);
int count = 0;
@@ -903,7 +939,6 @@
}
ASSERT(Capacity() > 0);
- ASSERT(Capacity() % AreaSize() == 0);
accounting_stats_.ShrinkSpace(AreaSize());
}
@@ -1042,6 +1077,7 @@
if (!to_space_.Commit()) {
return false;
}
+ ASSERT(!from_space_.is_committed()); // No need to use memory yet.
start_ = chunk_base_;
address_mask_ = ~(2 * reserved_semispace_capacity - 1);
@@ -1198,13 +1234,15 @@
allocation_info_.limit + inline_allocation_limit_step_,
high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
@@ -2581,7 +2619,7 @@
}
LargePage* page = heap()->isolate()->memory_allocator()->
- AllocateLargePage(object_size, executable, this);
+ AllocateLargePage(object_size, this, executable);
if (page == NULL) return Failure::RetryAfterGC(identity());
ASSERT(page->area_size() >= object_size);
diff --git a/src/spaces.h b/src/spaces.h
index b614c3b..90f62f3 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -637,8 +637,10 @@
friend class MemoryAllocator;
};
+
STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -950,11 +952,11 @@
void TearDown();
- Page* AllocatePage(PagedSpace* owner, Executability executable);
+ Page* AllocatePage(
+ intptr_t size, PagedSpace* owner, Executability executable);
- LargePage* AllocateLargePage(intptr_t object_size,
- Executability executable,
- Space* owner);
+ LargePage* AllocateLargePage(
+ intptr_t object_size, Space* owner, Executability executable);
void Free(MemoryChunk* chunk);
@@ -1625,6 +1627,8 @@
// Maximum capacity of this space.
intptr_t max_capacity_;
+ intptr_t SizeOfFirstPage();
+
// Accounting information for this space.
AllocationStats accounting_stats_;
diff --git a/src/string.js b/src/string.js
index 84dde3d..a464f7f 100644
--- a/src/string.js
+++ b/src/string.js
@@ -189,7 +189,9 @@
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
- return %StringMatch(subject, regexp, lastMatchInfo);
+ var result = %StringMatch(subject, regexp, lastMatchInfo);
+ if (result !== null) lastMatchInfoOverride = null;
+ return result;
}
// Non-regexp argument.
regexp = new $RegExp(regexp);
diff --git a/src/v8.cc b/src/v8.cc
index d9351dc..45036c8 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -27,11 +27,13 @@
#include "v8.h"
+#include "assembler.h"
#include "isolate.h"
#include "elements.h"
#include "bootstrapper.h"
#include "debug.h"
#include "deoptimizer.h"
+#include "frames.h"
#include "heap-profiler.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
@@ -262,7 +264,7 @@
OS::PostSetUp();
- RuntimeProfiler::GlobalSetup();
+ RuntimeProfiler::GlobalSetUp();
ElementsAccessor::InitializeOncePerProcess();
@@ -273,6 +275,9 @@
}
LOperand::SetUpCaches();
+ SetUpJSCallerSavedCodeData();
+ SamplerRegistry::SetUp();
+ ExternalReference::SetUp();
}
void V8::InitializeOncePerProcess() {
diff --git a/src/version.cc b/src/version.cc
index a93048c..6b07103 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 10
-#define BUILD_NUMBER 0
-#define PATCH_LEVEL 5
+#define BUILD_NUMBER 1
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index adeda0b..7ed81b4 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -315,7 +315,8 @@
rex_(0),
operand_size_(0),
group_1_prefix_(0),
- byte_size_operand_(false) {
+ byte_size_operand_(false),
+ instruction_table_(instruction_table.Pointer()) {
tmp_buffer_[0] = '\0';
}
@@ -344,6 +345,7 @@
byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
// Byte size operand override.
bool byte_size_operand_;
+ const InstructionTable* const instruction_table_;
void setRex(byte rex) {
ASSERT_EQ(0x40, rex & 0xF0);
@@ -1340,7 +1342,7 @@
data++;
}
- const InstructionDesc& idesc = instruction_table.Get().Get(current);
+ const InstructionDesc& idesc = instruction_table_->Get(current);
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 1f5ec4b..56ba6f3 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -4080,6 +4080,14 @@
deferred->entry(),
TAG_OBJECT);
+ __ bind(deferred->exit());
+ if (FLAG_debug_code) {
+ Label is_in_new_space;
+ __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+ __ Abort("Allocated object is not in new-space");
+ __ bind(&is_in_new_space);
+ }
+
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(scratch, constructor);
@@ -4114,14 +4122,14 @@
__ movq(FieldOperand(result, property_offset), scratch);
}
}
-
- __ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4129,8 +4137,8 @@
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ PushHeapObject(constructor);
- CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ __ Push(Smi::FromInt(instance_size));
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(result, rax);
}
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a66bde8..bf232bf 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -542,9 +542,13 @@
void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
- __ movl(rax, current_character());
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
+ if (c == 0) {
+ __ testl(current_character(), Immediate(mask));
+ } else {
+ __ movl(rax, Immediate(mask));
+ __ and_(rax, current_character());
+ __ cmpl(rax, Immediate(c));
+ }
BranchOrBacktrack(equal, on_equal);
}
@@ -552,9 +556,13 @@
void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal) {
- __ movl(rax, current_character());
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
+ if (c == 0) {
+ __ testl(current_character(), Immediate(mask));
+ } else {
+ __ movl(rax, Immediate(mask));
+ __ and_(rax, current_character());
+ __ cmpl(rax, Immediate(c));
+ }
BranchOrBacktrack(not_equal, on_not_equal);
}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 999e2c6..f75dc30 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1264,9 +1264,9 @@
v8::HandleScope scope;
AlwaysAllocateScope always_allocate;
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
- intptr_t number_of_fillers = (available / FixedArray::SizeFor(1000)) - 10;
+ intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
- CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(1000, NOT_TENURED)));
+ CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(32, NOT_TENURED)));
}
}
@@ -1521,17 +1521,13 @@
while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
!marking->IsStopped()) {
- marking->Step(MB);
+ // Discard any pending GC requests otherwise we will get GC when we enter
+ // code below.
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
}
CHECK(marking->IsMarking());
- // Discard any pending GC requests otherwise we will get GC when we enter
- // code below.
- if (ISOLATE->stack_guard()->IsGCRequest()) {
- ISOLATE->stack_guard()->Continue(GC_REQUEST);
- }
-
{
v8::HandleScope scope;
v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
@@ -1597,3 +1593,119 @@
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ marking->Abort();
+ marking->Start();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ while (!marking->IsStopped() && !marking->IsComplete()) {
+ marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ }
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ HEAP->incremental_marking()->Abort();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ // Since incremental marking is off, IdleNotification will do full GC.
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
+
+
+// Test that HAllocateObject will always return an object in new-space.
+TEST(OptimizedAllocationAlwaysInNewSpace) {
+ i::FLAG_allow_natives_syntax = true;
+ InitializeVM();
+ if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
+ v8::HandleScope scope;
+
+ FillUpNewSpace(HEAP->new_space());
+ AlwaysAllocateScope always_allocate;
+ v8::Local<v8::Value> res = CompileRun(
+ "function c(x) {"
+ " this.x = x;"
+ " for (var i = 0; i < 32; i++) {"
+ " this['x' + i] = x;"
+ " }"
+ "}"
+ "function f(x) { return new c(x); };"
+ "f(1); f(2); f(3);"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f(4);");
+ CHECK_EQ(4, res->ToObject()->GetRealNamedProperty(v8_str("x"))->Int32Value());
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ CHECK(HEAP->InNewSpace(*o));
+}
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 973af19..83a576d 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -534,15 +534,15 @@
intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(booted_memory - initial_memory, 6686 * 1024); // 6476.
+ CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3396.
} else {
- CHECK_LE(booted_memory - initial_memory, 6809 * 1024); // 6628.
+ CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3432.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(booted_memory - initial_memory, 6532 * 1024); // 6388.
+ CHECK_LE(booted_memory - initial_memory, 2600 * 1024); // 2484.
} else {
- CHECK_LE(booted_memory - initial_memory, 6940 * 1024); // 6456
+ CHECK_LE(booted_memory - initial_memory, 2950 * 1024); // 2844
}
}
}
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index d941d0f..54898a0 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -504,7 +504,10 @@
return NULL;
Handle<String> pattern = isolate->factory()->
NewStringFromUtf8(CStrVector(input));
- RegExpEngine::Compile(&compile_data, false, multiline, pattern, is_ascii);
+ Handle<String> sample_subject =
+ isolate->factory()->NewStringFromUtf8(CStrVector(""));
+ RegExpEngine::Compile(
+ &compile_data, false, multiline, pattern, sample_subject, is_ascii);
return compile_data.node;
}
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index 92de2a6..0e95704 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -140,8 +140,8 @@
heap->MaxReserved(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
- Page* first_page =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+ Page* first_page = memory_allocator->AllocatePage(
+ faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
@@ -153,8 +153,8 @@
}
// Again, we should get n or n - 1 pages.
- Page* other =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+ Page* other = memory_allocator->AllocatePage(
+ faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index e11349b..e2a179f 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -587,3 +587,36 @@
CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
CHECK_EQ("cdefghijklmnopqrstuvwx", *(string->ToCString()));
}
+
+
+TEST(AsciiArrayJoin) {
+ // Set heap limits.
+ static const int K = 1024;
+ v8::ResourceConstraints constraints;
+ constraints.set_max_young_space_size(256 * K);
+ constraints.set_max_old_space_size(4 * K * K);
+ v8::SetResourceConstraints(&constraints);
+
+ // String s is made of 2^17 = 131072 'c' characters and a is an array
+ // starting with 'bad', followed by 2^14 times the string s. That means the
+ // total length of the concatenated strings is 2^31 + 3. So on 32bit systems
+ // summing the lengths of the strings (as Smis) overflows and wraps.
+ static const char* join_causing_out_of_memory =
+ "var two_14 = Math.pow(2, 14);"
+ "var two_17 = Math.pow(2, 17);"
+ "var s = Array(two_17 + 1).join('c');"
+ "var a = ['bad'];"
+ "for (var i = 1; i <= two_14; i++) a.push(s);"
+ "a.join("");";
+
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::V8::IgnoreOutOfMemoryException();
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(v8::String::New(join_causing_out_of_memory));
+ v8::Local<v8::Value> result = script->Run();
+
+ // Check for out of memory state.
+ CHECK(result.IsEmpty());
+ CHECK(context->HasOutOfMemoryException());
+}
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index 1aa57e3..cebabaa 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -255,6 +255,10 @@
threads[i]->Join();
delete threads[i];
}
+ {
+ v8::Locker locker;
+ v8::Locker::StopPreemption();
+ }
delete semaphore;
semaphore = NULL;
diff --git a/test/mjsunit/regress/regress-119429.js b/test/mjsunit/regress/regress-119429.js
new file mode 100644
index 0000000..a876487
--- /dev/null
+++ b/test/mjsunit/regress/regress-119429.js
@@ -0,0 +1,37 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var d = 0;
+function recurse() {
+ if (++d == 25135) { // A magic number just below stack overflow on ia32
+ %DebugBreak();
+ }
+ recurse();
+}
+assertThrows(function() { recurse();} );
diff --git a/test/mjsunit/regress/regress-2034.js b/test/mjsunit/regress/regress-2034.js
new file mode 100644
index 0000000..c510f97
--- /dev/null
+++ b/test/mjsunit/regress/regress-2034.js
@@ -0,0 +1,46 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-collections
+
+var key = {};
+var map = new WeakMap;
+Object.preventExtensions(key);
+
+// Try querying using frozen key.
+assertFalse(map.has(key));
+assertSame(undefined, map.get(key));
+
+// Try adding using frozen key.
+map.set(key, 1);
+assertTrue(map.has(key));
+assertSame(1, map.get(key));
+
+// Try deleting using frozen key.
+map.delete(key, 1);
+assertFalse(map.has(key));
+assertSame(undefined, map.get(key));
diff --git a/test/mjsunit/regress/regress-2054.js b/test/mjsunit/regress/regress-2054.js
new file mode 100644
index 0000000..97b989c
--- /dev/null
+++ b/test/mjsunit/regress/regress-2054.js
@@ -0,0 +1,34 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can correctly optimize top level code that contains a
+// throw (or return) as it's last statement.
+
+var N = 1e5; // Number of iterations that trigger optimization.
+for (var i = 0; i < N; i++) {
+ if (i > N) throw new Error;
+}
diff --git a/test/mjsunit/regress/regress-2055.js b/test/mjsunit/regress/regress-2055.js
new file mode 100644
index 0000000..1eaf62c
--- /dev/null
+++ b/test/mjsunit/regress/regress-2055.js
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that array literal boilerplate objects can be transitioned while
+// existing un-transitioned clones are still being populated.
+
+function test1(depth) {
+ if (--depth < 0) {
+ return [];
+ } else {
+ return [ 0, test1(depth) ];
+ }
+}
+assertEquals([0,[0,[]]], test1(2));
+
+function test2(depth) {
+ if (--depth < 0) {
+ return [];
+ } else {
+ var o = [ 0, test2(depth) ];
+ return (depth == 0) ? 0.5 : o;
+ }
+}
+assertEquals([0,0.5], test2(2));
diff --git a/test/mjsunit/regress/regress-2056.js b/test/mjsunit/regress/regress-2056.js
new file mode 100644
index 0000000..d34a750
--- /dev/null
+++ b/test/mjsunit/regress/regress-2056.js
@@ -0,0 +1,66 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var cases = [
+ [0.0, 0.0, 0.0, 0,0],
+ [undefined, 0.0, NaN, NaN],
+ [0.0, undefined, NaN, NaN],
+ [NaN, 0.0, NaN, NaN],
+ [0.0, NaN, NaN, NaN],
+ [-NaN, 0.0, NaN, NaN],
+ [0.0, -NaN, NaN, NaN],
+ [Infinity, 0.0, Infinity, 0.0],
+ [0.0, Infinity, Infinity, 0.0],
+ [-Infinity, 0.0, 0.0, -Infinity],
+ [0.0, -Infinity, 0.0, -Infinity]
+];
+
+function do_min(a, b) {
+ return Math.min(a, b);
+}
+
+function do_max(a, b) {
+ return Math.max(a, b);
+}
+
+// Make sure that non-crankshaft results match expectations.
+for (i = 0; i < cases.length; ++i) {
+ var c = cases[i];
+ assertEquals(c[3], do_min(c[0], c[1]));
+ assertEquals(c[2], do_max(c[0], c[1]));
+}
+
+// Make sure that crankshaft results match expectations.
+for (i = 0; i < cases.length; ++i) {
+ var c = cases[i];
+ %OptimizeFunctionOnNextCall(do_min);
+ %OptimizeFunctionOnNextCall(do_max);
+ assertEquals(c[3], do_min(c[0], c[1]));
+ assertEquals(c[2], do_max(c[0], c[1]));
+}
diff --git a/test/mjsunit/regress/regress-2058.js b/test/mjsunit/regress/regress-2058.js
new file mode 100644
index 0000000..9a69ea1
--- /dev/null
+++ b/test/mjsunit/regress/regress-2058.js
@@ -0,0 +1,37 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// See http://code.google.com/p/v8/issues/detail?id=2058
+
+// A match after a replace with a function argument needs to reset
+// the flag that determines whether we are using indices or substrings
+// to indicate the last match.
+"Now is the".replace(/Now (\w+) the/g, function() {
+ "foo bar".match(/( )/);
+ assertEquals(RegExp.$1, " ");
+})
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 67607ff..3f395bd 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -65,10 +65,12 @@
S7.8.4_A7.2_T5: FAIL_OK
S7.8.4_A7.2_T6: FAIL_OK
-# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
+# Linux for ia32 (and therefore simulators) default to extended 80 bit floating
+# point formats, so these tests checking 64-bit FP precision fail. The other
+# platforms/arch's pass these tests.
# We follow the other major JS engines by keeping this default.
-S8.5_A2.2: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
-S8.5_A2.1: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
+S8.5_A2.1: PASS || FAIL_OK
+S8.5_A2.2: PASS || FAIL_OK
############################ INVALID TESTS #############################
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index bb26f8a..46f85fe 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -59,6 +59,11 @@
'../../src/v8dll-main.cc',
],
'conditions': [
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
+ },
+ }],
['OS=="win"', {
'defines': [
'BUILDING_V8_SHARED',