Version 3.10.6
Fixed some bugs in accessing details of the last regexp match.
Fixed source property of empty RegExp objects. (issue 1982)
Enabled inlining some V8 API functions.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@11442 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 9ba7d3e..4877084 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2012-04-26: Version 3.10.6
+
+ Fixed some bugs in accessing details of the last regexp match.
+
+ Fixed source property of empty RegExp objects. (issue 1982)
+
+ Enabled inlining some V8 API functions.
+
+ Performance and stability improvements on all platforms.
+
+
2012-04-23: Version 3.10.5
Put new global var semantics behind a flag until WebKit tests are
diff --git a/include/v8.h b/include/v8.h
index 430f410..9024531 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1084,6 +1084,7 @@
* A zero length string.
*/
V8EXPORT static v8::Local<v8::String> Empty();
+ inline static v8::Local<v8::String> Empty(Isolate* isolate);
/**
* Returns true if the string is external
@@ -2561,6 +2562,11 @@
Handle<Boolean> V8EXPORT True();
Handle<Boolean> V8EXPORT False();
+inline Handle<Primitive> Undefined(Isolate* isolate);
+inline Handle<Primitive> Null(Isolate* isolate);
+inline Handle<Boolean> True(Isolate* isolate);
+inline Handle<Boolean> False(Isolate* isolate);
+
/**
* A set of constraints that specifies the limits of the runtime's memory use.
@@ -2811,13 +2817,13 @@
/**
* Associate embedder-specific data with the isolate
*/
- void SetData(void* data);
+ inline void SetData(void* data);
/**
- * Retrive embedder-specific data from the isolate.
+ * Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
- void* GetData();
+ inline void* GetData();
private:
Isolate();
@@ -3872,18 +3878,6 @@
PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
-template <size_t ptr_size> struct InternalConstants;
-
-// Internal constants for 32-bit systems.
-template <> struct InternalConstants<4> {
- static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
-// Internal constants for 64-bit systems.
-template <> struct InternalConstants<8> {
- static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -3895,8 +3889,7 @@
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
- static const int kStringResourceOffset =
- InternalConstants<kApiPointerSize>::kStringResourceOffset;
+ static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
@@ -3904,6 +3897,15 @@
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
+ static const int kIsolateStateOffset = 0;
+ static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
+ static const int kIsolateRootsOffset = 3 * kApiPointerSize;
+ static const int kUndefinedValueRootIndex = 5;
+ static const int kNullValueRootIndex = 7;
+ static const int kTrueValueRootIndex = 8;
+ static const int kFalseValueRootIndex = 9;
+ static const int kEmptySymbolRootIndex = 128;
+
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x82;
@@ -3956,6 +3958,28 @@
return representation == kExternalTwoByteRepresentationTag;
}
+ static inline bool IsInitialized(v8::Isolate* isolate) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
+ return *reinterpret_cast<int*>(addr) == 1;
+ }
+
+ static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+ kIsolateEmbedderDataOffset;
+ *reinterpret_cast<void**>(addr) = data;
+ }
+
+ static inline void* GetEmbedderData(v8::Isolate* isolate) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+ kIsolateEmbedderDataOffset;
+ return *reinterpret_cast<void**>(addr);
+ }
+
+ static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
+ return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
+ }
+
template <typename T>
static inline T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
@@ -4199,6 +4223,15 @@
}
+Local<String> String::Empty(Isolate* isolate) {
+ typedef internal::Object* S;
+ typedef internal::Internals I;
+ if (!I::IsInitialized(isolate)) return Empty();
+ S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
+ return Local<String>(reinterpret_cast<String*>(slot));
+}
+
+
String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
@@ -4378,6 +4411,54 @@
}
+Handle<Primitive> Undefined(Isolate* isolate) {
+ typedef internal::Object* S;
+ typedef internal::Internals I;
+ if (!I::IsInitialized(isolate)) return Undefined();
+ S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+ return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+
+Handle<Primitive> Null(Isolate* isolate) {
+ typedef internal::Object* S;
+ typedef internal::Internals I;
+ if (!I::IsInitialized(isolate)) return Null();
+ S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
+ return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+
+Handle<Boolean> True(Isolate* isolate) {
+ typedef internal::Object* S;
+ typedef internal::Internals I;
+ if (!I::IsInitialized(isolate)) return True();
+ S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
+ return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+
+Handle<Boolean> False(Isolate* isolate) {
+ typedef internal::Object* S;
+ typedef internal::Internals I;
+ if (!I::IsInitialized(isolate)) return False();
+ S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
+ return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+
+void Isolate::SetData(void* data) {
+ typedef internal::Internals I;
+ I::SetEmbedderData(this, data);
+}
+
+
+void* Isolate::GetData() {
+ typedef internal::Internals I;
+ return I::GetEmbedderData(this);
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/src/api.cc b/src/api.cc
index 124875a..0bc93c2 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4630,7 +4630,9 @@
Local<String> v8::String::Empty() {
i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
+ if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
+ return v8::Local<String>();
+ }
LOG_API(isolate, "String::Empty()");
return Utils::ToLocal(isolate->factory()->empty_symbol());
}
@@ -5396,17 +5398,6 @@
}
-void Isolate::SetData(void* data) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->SetData(data);
-}
-
-void* Isolate::GetData() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->GetData();
-}
-
-
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 7b2a3c4..699e6aa 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -457,6 +457,8 @@
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
@@ -464,7 +466,7 @@
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = 7 * kPointerSize;
+ unsigned fixed_frame_size = 8 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -529,6 +531,15 @@
top_address + output_offset, output_offset, value);
}
+ // The output frame reflects a JSConstructStubGeneric frame.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(construct_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -559,8 +570,6 @@
ASSERT(0 == output_offset);
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index a5e20b2..7d0d58c 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1315,6 +1315,75 @@
}
+bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
+ uint32_t divisor_abs = abs(divisor);
+ // Dividing by 0, 1, and powers of 2 is easy.
+ // Note that IsPowerOf2(0) returns true;
+ ASSERT(IsPowerOf2(0) == true);
+ if (IsPowerOf2(divisor_abs)) return true;
+
+ // We have magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer’s Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
+
+ return false;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
+ return NULL;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ // Only optimize when we have magic numbers for the divisor.
+ // The standard integer division routine is usually slower than transitionning
+ // to VFP.
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ int32_t int32_val = constant_val->Integer32Value();
+ if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
+ return constant_val->CopyToRepresentation(Representation::Integer32());
+ }
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegisterOrConstant(right);
+ LOperand* remainder = TempRegister();
+ ASSERT(right->IsConstant() &&
+ HConstant::cast(right)->HasInteger32Value() &&
+ HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
+ return AssignEnvironment(DefineAsRegister(
+ new LMathFloorOfDiv(dividend, divisor, remainder)));
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index fd5cb08..00e3f95 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -132,6 +132,7 @@
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathFloorOfDiv) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -579,6 +580,21 @@
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -2287,6 +2303,10 @@
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
private:
enum Status {
UNUSED,
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 3db7201..ba30e7a 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1034,6 +1034,100 @@
}
+void LCodeGen::EmitSignedIntegerDivisionByConstant(
+ Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment) {
+ ASSERT(!AreAliased(dividend, scratch, ip));
+ ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+
+ uint32_t divisor_abs = abs(divisor);
+
+ int32_t power_of_2_factor =
+ CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+
+ switch (divisor_abs) {
+ case 0:
+ DeoptimizeIf(al, environment);
+ return;
+
+ case 1:
+ if (divisor > 0) {
+ __ Move(result, dividend);
+ } else {
+ __ rsb(result, dividend, Operand(0), SetCC);
+ DeoptimizeIf(vs, environment);
+ }
+ // Compute the remainder.
+ __ mov(remainder, Operand(0));
+ return;
+
+ default:
+ if (IsPowerOf2(divisor_abs)) {
+ // Branch and condition free code for integer division by a power
+ // of two.
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (power > 1) {
+ __ mov(scratch, Operand(dividend, ASR, power - 1));
+ }
+ __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
+ __ mov(result, Operand(scratch, ASR, power));
+ // Negate if necessary.
+ // We don't need to check for overflow because the case '-1' is
+ // handled separately.
+ if (divisor < 0) {
+ ASSERT(divisor != -1);
+ __ rsb(result, result, Operand(0));
+ }
+ // Compute the remainder.
+ if (divisor > 0) {
+ __ sub(remainder, dividend, Operand(result, LSL, power));
+ } else {
+ __ add(remainder, dividend, Operand(result, LSL, power));
+ }
+ return;
+ } else {
+ // Use magic numbers for a few specific divisors.
+ // Details and proofs can be found in:
+ // - Hacker's Delight, Henry S. Warren, Jr.
+ // - The PowerPC Compiler Writer’s Guide
+ // and probably many others.
+ //
+ // We handle
+ // <divisor with magic numbers> * <power of 2>
+ // but not
+ // <divisor with magic numbers> * <other divisor with magic numbers>
+ DivMagicNumbers magic_numbers =
+ DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+ // Branch and condition free code for integer division by a power
+ // of two.
+ const int32_t M = magic_numbers.M;
+ const int32_t s = magic_numbers.s + power_of_2_factor;
+
+ __ mov(ip, Operand(M));
+ __ smull(ip, scratch, dividend, ip);
+ if (M < 0) {
+ __ add(scratch, scratch, Operand(dividend));
+ }
+ if (s > 0) {
+ __ mov(scratch, Operand(scratch, ASR, s));
+ }
+ __ add(result, scratch, Operand(dividend, LSR, 31));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ // Compute the remainder.
+ __ mov(ip, Operand(divisor));
+ // This sequence could be replaced with 'mls' when
+ // it gets implemented.
+ __ mul(scratch, result, ip);
+ __ sub(remainder, dividend, scratch);
+ }
+ }
+}
+
+
void LCodeGen::DoDivI(LDivI* instr) {
class DeferredDivI: public LDeferredCode {
public:
@@ -1115,6 +1209,34 @@
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ const Register result = ToRegister(instr->result());
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register remainder = ToRegister(instr->TempAt(0));
+ const Register scratch = scratch0();
+
+ // We only optimize this for division by constants, because the standard
+ // integer division routine is usually slower than transitionning to VFP.
+ // This could be optimized on processors with SDIV available.
+ ASSERT(instr->InputAt(1)->IsConstantOperand());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ if (divisor < 0) {
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ EmitSignedIntegerDivisionByConstant(result,
+ left,
+ divisor,
+ remainder,
+ scratch,
+ instr->environment());
+ // We operated a truncating division. Correct the result if necessary.
+ __ cmp(remainder, Operand(0));
+ __ teq(remainder, Operand(divisor), ne);
+ __ sub(result, result, Operand(1), LeaveCC, mi);
+}
+
+
template<int T>
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index d73581b..c6a3af7 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -323,6 +323,17 @@
Register source,
int* offset);
+ // Emit optimized code for integer division.
+ // Inputs are signed.
+ // All registers are clobbered.
+ // If 'remainder' is no_reg, it is not computed.
+ void EmitSignedIntegerDivisionByConstant(Register result,
+ Register dividend,
+ int32_t divisor,
+ Register remainder,
+ Register scratch,
+ LEnvironment* environment);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 857c2bf..42c9961 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3710,15 +3710,28 @@
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 666a36f..360f4c1 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -85,7 +85,14 @@
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+#endif
// MacroAssembler implements a collection of frequently used macros.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 6178815..c65c68c 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1011,7 +1011,7 @@
proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->empty_string());
+ heap->query_colon_symbol());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
index 3b9c59e..b73e8ac 100644
--- a/src/compiler-intrinsics.h
+++ b/src/compiler-intrinsics.h
@@ -40,6 +40,9 @@
// Returns number of zero bits following most significant 1 bit.
// Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value));
+
+ // Returns the number of bits set.
+ INLINE(static int CountSetBits(uint32_t value));
};
#ifdef __GNUC__
@@ -51,6 +54,10 @@
return __builtin_clz(value);
}
+int CompilerIntrinsics::CountSetBits(uint32_t value) {
+ return __builtin_popcount(value);
+}
+
#elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward)
@@ -68,6 +75,16 @@
return 31 - static_cast<int>(result);
}
+int CompilerIntrinsics::CountSetBits(uint32_t value) {
+ // Manually count set bits.
+ value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+ value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+ value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+ value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+ return value;
+}
+
#else
#error Unsupported compiler
#endif
diff --git a/src/double.h b/src/double.h
index 16a3245..fcf6906 100644
--- a/src/double.h
+++ b/src/double.h
@@ -130,12 +130,6 @@
return (d64 & kExponentMask) == kExponentMask;
}
- bool IsNan() const {
- uint64_t d64 = AsUint64();
- return ((d64 & kExponentMask) == kExponentMask) &&
- ((d64 & kSignificandMask) != 0);
- }
-
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 3f4ead8..62a9782 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -195,6 +195,8 @@
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
+DEFINE_bool(array_bounds_checks_elimination, true,
+ "perform array bounds checks elimination")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
diff --git a/src/frames.cc b/src/frames.cc
index 5911284..e265341 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1359,12 +1359,7 @@
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) {
- int n = 0;
- while (reglist != 0) {
- n++;
- reglist &= reglist - 1; // clear one bit
- }
- return n;
+ return CompilerIntrinsics::CountSetBits(reglist);
}
diff --git a/src/heap.cc b/src/heap.cc
index 9081017..5ef0447 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -238,12 +238,17 @@
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
- if (space != NEW_SPACE || FLAG_gc_global) {
+ if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
}
+ if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
+ *reason = "GC in old space forced by flags";
+ return MARK_COMPACTOR;
+ }
+
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
@@ -3010,7 +3015,6 @@
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -3024,6 +3028,8 @@
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_opt_count(0);
+ share->set_ic_age(0);
+ share->set_opt_reenable_tries(0);
return share;
}
@@ -5695,6 +5701,11 @@
intptr_t max_executable_size) {
if (HasBeenSetUp()) return false;
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semispace_size_ = Page::kPageSize;
+ }
+
if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize;
diff --git a/src/heap.h b/src/heap.h
index af86f44..10d3e3a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -243,7 +243,8 @@
V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity") \
- V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
+ V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
+ V(query_colon_symbol, "(?:)")
// Forward declarations.
class GCTracer;
@@ -1418,6 +1419,12 @@
kRootListLength
};
+ STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
+ STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
+ STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+ STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+ STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
+
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
@@ -1449,6 +1456,8 @@
inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true;
+ if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
@@ -1612,6 +1621,8 @@
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
+ Object* roots_[kRootListLength];
+
intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
@@ -1727,8 +1738,6 @@
// last GC.
int old_gen_exhausted_;
- Object* roots_[kRootListLength];
-
Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 4684b84..f8c021c 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -416,6 +416,7 @@
SetFlag(kIsDead);
for (int i = 0; i < OperandCount(); ++i) {
HValue* operand = OperandAt(i);
+ if (operand == NULL) continue;
HUseListNode* first = operand->use_list_;
if (first != NULL && first->value() == this && first->index() == i) {
operand->use_list_ = first->tail();
@@ -612,6 +613,7 @@
HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) {
HValue* other_operand = OperandAt(i);
+ if (other_operand == NULL) continue;
HBasicBlock* other_block = other_operand->block();
if (cur_block == other_block) {
if (!other_operand->IsPhi()) {
@@ -931,6 +933,62 @@
}
+HValue* HUnaryMathOperation::Canonicalize() {
+ if (op() == kMathFloor) {
+ // If the input is integer32 then we replace the floor instruction
+ // with its input. This happens before the representation changes are
+ // introduced.
+ if (value()->representation().IsInteger32()) return value();
+
+#ifdef V8_TARGET_ARCH_ARM
+ if (value()->IsDiv() && (value()->UseCount() == 1)) {
+ // TODO(2038): Implement this optimization for non ARM architectures.
+ HDiv* hdiv = HDiv::cast(value());
+ HValue* left = hdiv->left();
+ HValue* right = hdiv->right();
+ // Try to simplify left and right values of the division.
+ HValue* new_left =
+ LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
+ HValue* new_right =
+ LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+
+ // Return if left or right are not optimizable.
+ if ((new_left == NULL) || (new_right == NULL)) return this;
+
+ // Insert the new values in the graph.
+ if (new_left->IsInstruction() &&
+ !HInstruction::cast(new_left)->IsLinked()) {
+ HInstruction::cast(new_left)->InsertBefore(this);
+ }
+ if (new_right->IsInstruction() &&
+ !HInstruction::cast(new_right)->IsLinked()) {
+ HInstruction::cast(new_right)->InsertBefore(this);
+ }
+ HMathFloorOfDiv* instr = new HMathFloorOfDiv(context(),
+ new_left,
+ new_right);
+ // Replace this HMathFloor instruction by the new HMathFloorOfDiv.
+ instr->InsertBefore(this);
+ ReplaceAllUsesWith(instr);
+ Kill();
+ // We know the division had no other uses than this HMathFloor. Delete it.
+ // Also delete the arguments of the division if they are not used any
+ // more.
+ hdiv->DeleteAndReplaceWith(NULL);
+ ASSERT(left->IsChange() || left->IsConstant());
+ ASSERT(right->IsChange() || right->IsConstant());
+ if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
+ if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
+
+ // Return NULL to remove this instruction from the graph.
+ return NULL;
+ }
+#endif // V8_TARGET_ARCH_ARM
+ }
+ return this;
+}
+
+
HValue* HCheckInstanceType::Canonicalize() {
if (check_ == IS_STRING &&
!value()->type().IsUninitialized() &&
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 0c578ac..9d262fc 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -140,6 +140,7 @@
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathFloorOfDiv) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@@ -1992,15 +1993,7 @@
}
}
- virtual HValue* Canonicalize() {
- // If the input is integer32 then we replace the floor instruction
- // with its inputs. This happens before the representation changes are
- // introduced.
- if (op() == kMathFloor) {
- if (value()->representation().IsInteger32()) return value();
- }
- return this;
- }
+ virtual HValue* Canonicalize();
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2758,6 +2751,25 @@
};
+class HMathFloorOfDiv: public HBinaryOperation {
+ public:
+ HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Integer32();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HArithmeticBinaryOperation: public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index fecdcf0..ba6c6f5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1552,14 +1552,100 @@
}
+SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
+ char underlying_buffer[kLastFlag * 128];
+ Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
+#if DEBUG
+ int offset = 0;
+ const char* separator = "";
+ const char* comma = ", ";
+ buffer[0] = 0;
+ uint32_t set_depends_on = 0;
+ uint32_t set_changes = 0;
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if ((flags.ToIntegral() & (1 << bit)) != 0) {
+ if (bit % 2 == 0) {
+ set_changes++;
+ } else {
+ set_depends_on++;
+ }
+ }
+ }
+ bool positive_changes = set_changes < (kLastFlag / 2);
+ bool positive_depends_on = set_depends_on < (kLastFlag / 2);
+ if (set_changes > 0) {
+ if (positive_changes) {
+ offset += OS::SNPrintF(buffer + offset, "changes [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "changes all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kChanges##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+ if (set_depends_on > 0) {
+ separator = "";
+ if (set_changes > 0) {
+ offset += OS::SNPrintF(buffer + offset, ", ");
+ }
+ if (positive_depends_on) {
+ offset += OS::SNPrintF(buffer + offset, "depends on [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "depends on all except [");
+ }
+ for (int bit = 0; bit < kLastFlag; ++bit) {
+ if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kDependsOn##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+#else
+ OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
+#endif
+ size_t string_len = strlen(underlying_buffer) + 1;
+ ASSERT(string_len <= sizeof(underlying_buffer));
+ char* result = new char[strlen(underlying_buffer) + 1];
+ memcpy(result, underlying_buffer, string_len);
+ return SmartArrayPointer<char>(result);
+}
+
+
void HGlobalValueNumberer::LoopInvariantCodeMotion() {
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
+ TraceGVN("Try loop invariant motion for block B%d %s\n",
block->block_id(),
- side_effects.ToIntegral());
+ *GetGVNFlagsString(side_effects));
GVNFlagSet accumulated_first_time_depends;
GVNFlagSet accumulated_first_time_changes;
@@ -1582,20 +1668,19 @@
GVNFlagSet* first_time_changes) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
+ TraceGVN("Loop invariant motion for B%d %s\n",
block->block_id(),
- depends_flags.ToIntegral());
+ *GetGVNFlagsString(depends_flags));
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
bool hoisted = false;
if (instr->CheckFlag(HValue::kUseGVN)) {
- TraceGVN("Checking instruction %d (%s) instruction GVN flags 0x%X, "
- "loop kills 0x%X\n",
+ TraceGVN("Checking instruction %d (%s) %s. Loop %s\n",
instr->id(),
instr->Mnemonic(),
- instr->gvn_flags().ToIntegral(),
- depends_flags.ToIntegral());
+ *GetGVNFlagsString(instr->gvn_flags()),
+ *GetGVNFlagsString(loop_kills));
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the
@@ -1619,14 +1704,14 @@
hoist_change_blockers.Add(kChangesArrayElements);
}
TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) "
- "hoist depends blockers 0x%X, hoist change blockers 0x%X, "
- "accumulated depends 0x%X, accumulated changes 0x%X\n",
+ "hoist blockers: %s %s; "
+ "first-time accumulated: %s %s\n",
instr->id(),
instr->Mnemonic(),
- hoist_depends_blockers.ToIntegral(),
- hoist_change_blockers.ToIntegral(),
- first_time_depends->ToIntegral(),
- first_time_changes->ToIntegral());
+ *GetGVNFlagsString(hoist_depends_blockers),
+ *GetGVNFlagsString(hoist_change_blockers),
+ *GetGVNFlagsString(*first_time_depends),
+ *GetGVNFlagsString(*first_time_changes));
// It's possible to hoist transition from the current loop loop only if
// they dominate all of the successor blocks in the same loop and there
// are not any instructions that have Changes/DependsOn that intervene
@@ -1661,8 +1746,18 @@
if (!hoisted) {
// If an instruction is not hoisted, we have to account for its side
// effects when hoisting later HTransitionElementsKind instructions.
+ GVNFlagSet previous_depends = *first_time_depends;
+ GVNFlagSet previous_changes = *first_time_changes;
first_time_depends->Add(instr->DependsOnFlags());
first_time_changes->Add(instr->ChangesFlags());
+ if (!(previous_depends == *first_time_depends)) {
+ TraceGVN("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_depends));
+ }
+ if (!(previous_changes == *first_time_changes)) {
+ TraceGVN("Updated first-time accumulated %s\n",
+ *GetGVNFlagsString(*first_time_changes));
+ }
}
instr = next;
}
@@ -2632,29 +2727,328 @@
HStackCheckEliminator sce(graph());
sce.Process();
- // Replace the results of check instructions with the original value, if the
- // result is used. This is safe now, since we don't do code motion after this
- // point. It enables better register allocation since the value produced by
- // check instructions is really a copy of the original value.
- graph()->ReplaceCheckedValues();
+ graph()->EliminateRedundantBoundsChecks();
return graph();
}
-void HGraph::ReplaceCheckedValues() {
- HPhase phase("H_Replace checked values", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- HInstruction* instr = blocks()->at(i)->first();
- while (instr != NULL) {
- if (instr->IsBoundsCheck()) {
- // Replace all uses of the checked value with the original input.
- ASSERT(instr->UseCount() > 0);
- instr->ReplaceAllUsesWith(HBoundsCheck::cast(instr)->index());
+// We try to "factor up" HBoundsCheck instructions towards the root of the
+// dominator tree.
+// For now we handle checks where the index is like "exp + int32value".
+// If in the dominator tree we check "exp + v1" and later (dominated)
+// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
+// v2 > v1 we can use v2 in the 1st check and again remove the second.
+// To do so we keep a dictionary of all checks where the key if the pair
+// "exp, length".
+// The class BoundsCheckKey represents this key.
+class BoundsCheckKey : public ZoneObject {
+ public:
+ HValue* IndexBase() const { return index_base_; }
+ HValue* Length() const { return length_; }
+
+ uint32_t Hash() {
+ return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
+ }
+
+ static BoundsCheckKey* Create(Zone* zone,
+ HBoundsCheck* check,
+ int32_t* offset) {
+ HValue* index_base = NULL;
+ HConstant* constant = NULL;
+ bool is_sub = false;
+
+ if (check->index()->IsAdd()) {
+ HAdd* index = HAdd::cast(check->index());
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
}
- instr = instr->next();
+ } else if (check->index()->IsSub()) {
+ HSub* index = HSub::cast(check->index());
+ is_sub = true;
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
+ }
+ }
+
+ if (constant != NULL && constant->HasInteger32Value()) {
+ *offset = is_sub ? - constant->Integer32Value()
+ : constant->Integer32Value();
+ } else {
+ *offset = 0;
+ index_base = check->index();
+ }
+
+ return new(zone) BoundsCheckKey(index_base, check->length());
+ }
+
+ private:
+ BoundsCheckKey(HValue* index_base, HValue* length)
+ : index_base_(index_base),
+ length_(length) { }
+
+ HValue* index_base_;
+ HValue* length_;
+};
+
+
+// Data about each HBoundsCheck that can be eliminated or moved.
+// It is the "value" in the dictionary indexed by "base-index, length"
+// (the key is BoundsCheckKey).
+// We scan the code with a dominator tree traversal.
+// Traversing the dominator tree we keep a stack (implemented as a singly
+// linked list) of "data" for each basic block that contains a relevant check
+// with the same key (the dictionary holds the head of the list).
+// We also keep all the "data" created for a given basic block in a list, and
+// use it to "clean up" the dictionary when backtracking in the dominator tree
+// traversal.
+// Doing this each dictionary entry always directly points to the check that
+// is dominating the code being examined now.
+// We also track the current "offset" of the index expression and use it to
+// decide if any check is already "covered" (so it can be removed) or not.
+class BoundsCheckBbData: public ZoneObject {
+ public:
+ BoundsCheckKey* Key() const { return key_; }
+ int32_t LowerOffset() const { return lower_offset_; }
+ int32_t UpperOffset() const { return upper_offset_; }
+ HBasicBlock* BasicBlock() const { return basic_block_; }
+ HBoundsCheck* Check() const { return check_; }
+ BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
+ BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+
+ bool OffsetIsCovered(int32_t offset) const {
+ return offset >= LowerOffset() && offset <= UpperOffset();
+ }
+
+ // This method removes new_check and modifies the current check so that it
+ // also "covers" what new_check covered.
+ // The obvious precondition is that new_check follows Check() in the
+ // same basic block, and that new_offset is not covered (otherwise we
+ // could simply remove new_check).
+ // As a consequence LowerOffset() or UpperOffset() change (the covered
+ // range grows).
+ //
+ // In the general case the check covering the current range should be like
+ // these two checks:
+ // 0 <= Key()->IndexBase() + LowerOffset()
+ // Key()->IndexBase() + UpperOffset() < Key()->Length()
+ //
+ // We can transform the second check like this:
+ // Key()->IndexBase() + LowerOffset() <
+ // Key()->Length() + (LowerOffset() - UpperOffset())
+ // so we can handle both checks with a single unsigned comparison.
+ //
+ // The bulk of this method changes Check()->index() and Check()->length()
+ // replacing them with new HAdd instructions to perform the transformation
+ // described above.
+ void CoverCheck(HBoundsCheck* new_check,
+ int32_t new_offset) {
+ ASSERT(new_check->index()->representation().IsInteger32());
+
+ if (new_offset > upper_offset_) {
+ upper_offset_ = new_offset;
+ } else if (new_offset < lower_offset_) {
+ lower_offset_ = new_offset;
+ } else {
+ ASSERT(false);
+ }
+
+ BuildOffsetAdd(&added_index_,
+ &added_index_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ lower_offset_);
+ Check()->SetOperandAt(0, added_index_);
+ BuildOffsetAdd(&added_length_,
+ &added_length_offset_,
+ Key()->Length(),
+ new_check->length()->representation(),
+ lower_offset_ - upper_offset_);
+ Check()->SetOperandAt(1, added_length_);
+
+ new_check->DeleteAndReplaceWith(NULL);
+ }
+
+ void RemoveZeroOperations() {
+ RemoveZeroAdd(&added_index_, &added_index_offset_);
+ RemoveZeroAdd(&added_length_, &added_length_offset_);
+ }
+
+ BoundsCheckBbData(BoundsCheckKey* key,
+ int32_t lower_offset,
+ int32_t upper_offset,
+ HBasicBlock* bb,
+ HBoundsCheck* check,
+ BoundsCheckBbData* next_in_bb,
+ BoundsCheckBbData* father_in_dt)
+ : key_(key),
+ lower_offset_(lower_offset),
+ upper_offset_(upper_offset),
+ basic_block_(bb),
+ check_(check),
+ added_index_offset_(NULL),
+ added_index_(NULL),
+ added_length_offset_(NULL),
+ added_length_(NULL),
+ next_in_bb_(next_in_bb),
+ father_in_dt_(father_in_dt) { }
+
+ private:
+ BoundsCheckKey* key_;
+ int32_t lower_offset_;
+ int32_t upper_offset_;
+ HBasicBlock* basic_block_;
+ HBoundsCheck* check_;
+ HConstant* added_index_offset_;
+ HAdd* added_index_;
+ HConstant* added_length_offset_;
+ HAdd* added_length_;
+ BoundsCheckBbData* next_in_bb_;
+ BoundsCheckBbData* father_in_dt_;
+
+ void BuildOffsetAdd(HAdd** add,
+ HConstant** constant,
+ HValue* original_value,
+ Representation representation,
+ int32_t new_offset) {
+ HConstant* new_constant = new(BasicBlock()->zone())
+ HConstant(Handle<Object>(Smi::FromInt(new_offset)),
+ Representation::Integer32());
+ if (*add == NULL) {
+ new_constant->InsertBefore(Check());
+ *add = new(BasicBlock()->zone()) HAdd(NULL,
+ original_value,
+ new_constant);
+ (*add)->AssumeRepresentation(representation);
+ (*add)->InsertBefore(Check());
+ } else {
+ new_constant->InsertBefore(*add);
+ (*constant)->DeleteAndReplaceWith(new_constant);
+ }
+ *constant = new_constant;
+ }
+
+ void RemoveZeroAdd(HAdd** add, HConstant** constant) {
+ if (*add != NULL && (*constant)->Integer32Value() == 0) {
+ (*add)->DeleteAndReplaceWith((*add)->left());
+ (*constant)->DeleteAndReplaceWith(NULL);
}
}
+};
+
+
+static bool BoundsCheckKeyMatch(void* key1, void* key2) {
+ BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
+ BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
+ return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
+}
+
+
+class BoundsCheckTable : private ZoneHashMap {
+ public:
+ BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key) {
+ return reinterpret_cast<BoundsCheckBbData**>(
+ &(Lookup(key, key->Hash(), true)->value));
+ }
+
+ void Insert(BoundsCheckKey* key, BoundsCheckBbData* data) {
+ Lookup(key, key->Hash(), true)->value = data;
+ }
+
+ void Delete(BoundsCheckKey* key) {
+ Remove(key, key->Hash());
+ }
+
+ BoundsCheckTable() : ZoneHashMap(BoundsCheckKeyMatch) { }
+};
+
+
+// Eliminates checks in bb and recursively in the dominated blocks.
+// Also replace the results of check instructions with the original value, if
+// the result is used. This is safe now, since we don't do code motion after
+// this point. It enables better register allocation since the value produced
+// by check instructions is really a copy of the original value.
+void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
+ BoundsCheckTable* table) {
+ BoundsCheckBbData* bb_data_list = NULL;
+
+ for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+ if (!i->IsBoundsCheck()) continue;
+
+ HBoundsCheck* check = HBoundsCheck::cast(i);
+ check->ReplaceAllUsesWith(check->index());
+
+ isolate()->counters()->array_bounds_checks_seen()->Increment();
+ if (!FLAG_array_bounds_checks_elimination) continue;
+
+ int32_t offset;
+ BoundsCheckKey* key =
+ BoundsCheckKey::Create(bb->zone(), check, &offset);
+ BoundsCheckBbData** data_p = table->LookupOrInsert(key);
+ BoundsCheckBbData* data = *data_p;
+ if (data == NULL) {
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ offset,
+ offset,
+ bb,
+ check,
+ bb_data_list,
+ NULL);
+ *data_p = bb_data_list;
+ } else if (data->OffsetIsCovered(offset)) {
+ check->DeleteAndReplaceWith(NULL);
+ isolate()->counters()->array_bounds_checks_removed()->Increment();
+ } else if (data->BasicBlock() == bb) {
+ data->CoverCheck(check, offset);
+ isolate()->counters()->array_bounds_checks_removed()->Increment();
+ } else {
+ int32_t new_lower_offset = offset < data->LowerOffset()
+ ? offset
+ : data->LowerOffset();
+ int32_t new_upper_offset = offset > data->UpperOffset()
+ ? offset
+ : data->UpperOffset();
+ bb_data_list = new(bb->zone()) BoundsCheckBbData(key,
+ new_lower_offset,
+ new_upper_offset,
+ bb,
+ check,
+ bb_data_list,
+ data);
+ table->Insert(key, bb_data_list);
+ }
+ }
+
+ for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
+ EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i), table);
+ }
+
+ for (BoundsCheckBbData* data = bb_data_list;
+ data != NULL;
+ data = data->NextInBasicBlock()) {
+ data->RemoveZeroOperations();
+ if (data->FatherInDominatorTree()) {
+ table->Insert(data->Key(), data->FatherInDominatorTree());
+ } else {
+ table->Delete(data->Key());
+ }
+ }
+}
+
+
+void HGraph::EliminateRedundantBoundsChecks() {
+ HPhase phase("H_Eliminate bounds checks", this);
+ AssertNoAllocation no_gc;
+ BoundsCheckTable checks_table;
+ EliminateRedundantBoundsChecks(entry_block(), &checks_table);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 7b7b922..a52bf3b 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -241,7 +241,7 @@
HStackCheck* stack_check_;
};
-
+class BoundsCheckTable;
class HGraph: public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
@@ -266,6 +266,7 @@
void OrderBlocks();
void AssignDominators();
void ReplaceCheckedValues();
+ void EliminateRedundantBoundsChecks();
void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
@@ -358,6 +359,7 @@
void InferTypes(ZoneList<HValue*>* worklist);
void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+ void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
Isolate* isolate_;
int next_block_id_;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 3f10c09..73961e1 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -548,6 +548,8 @@
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
@@ -555,7 +557,7 @@
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = 6 * kPointerSize;
+ unsigned fixed_frame_size = 7 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -620,6 +622,15 @@
top_address + output_offset, output_offset, value);
}
+ // The output frame reflects a JSConstructStubGeneric frame.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(construct_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -641,8 +652,6 @@
ASSERT(0 == output_offset);
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 2a3eb57..aeddcd3 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1355,6 +1355,12 @@
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
diff --git a/src/isolate.cc b/src/isolate.cc
index e805122..0c97abd 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1430,6 +1430,7 @@
Isolate::Isolate()
: state_(UNINITIALIZED),
+ embedder_data_(NULL),
entry_stack_(NULL),
stack_trace_nesting_level_(0),
incomplete_message_(NULL),
@@ -1472,7 +1473,6 @@
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- embedder_data_(NULL),
context_exit_happened_(false) {
TRACE_ISOLATE(constructor);
@@ -1857,6 +1857,13 @@
LOG(this, LogCompiledFunctions());
}
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
+ Internals::kIsolateStateOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
+ Internals::kIsolateEmbedderDataOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
+ Internals::kIsolateRootsOffset);
+
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
return true;
diff --git a/src/isolate.h b/src/isolate.h
index 2ff1318..f1c9b3c 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -422,7 +422,7 @@
enum AddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
-#undef C
+#undef DECLARE_ENUM
kIsolateAddressCount
};
@@ -1038,6 +1038,18 @@
friend struct GlobalState;
friend struct InitializeGlobalState;
+ enum State {
+ UNINITIALIZED, // Some components may not have been allocated.
+ INITIALIZED // All components are fully initialized.
+ };
+
+ // These fields are accessed through the API, offsets must be kept in sync
+ // with v8::internal::Internals (in include/v8.h) constants. This is also
+ // verified in Isolate::Init() using runtime checks.
+ State state_; // Will be padded to kApiPointerSize.
+ void* embedder_data_;
+ Heap heap_;
+
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
@@ -1095,14 +1107,6 @@
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- enum State {
- UNINITIALIZED, // Some components may not have been allocated.
- INITIALIZED // All components are fully initialized.
- };
-
- State state_;
- EntryStackItem* entry_stack_;
-
// Allocate and insert PerIsolateThreadData into the ThreadDataTable
// (regardless of whether such data already exists).
PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
@@ -1146,13 +1150,13 @@
// the Error object.
bool IsErrorObject(Handle<Object> obj);
+ EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
// The preallocated memory thread singleton.
PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
NoAllocationStringAllocator* preallocated_message_space_;
-
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
@@ -1161,7 +1165,6 @@
Mutex* break_access_;
Atomic32 debugger_initialized_;
Mutex* debugger_access_;
- Heap heap_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
@@ -1202,11 +1205,8 @@
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
-
DateCache* date_cache_;
-
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- void* embedder_data_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 1e413d8..1055ad3 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -5175,45 +5175,6 @@
}
-
-// -------------------------------------------------------------------
-// Interest propagation
-
-
-RegExpNode* RegExpNode::TryGetSibling(NodeInfo* info) {
- for (int i = 0; i < siblings_.length(); i++) {
- RegExpNode* sibling = siblings_.Get(i);
- if (sibling->info()->Matches(info))
- return sibling;
- }
- return NULL;
-}
-
-
-RegExpNode* RegExpNode::EnsureSibling(NodeInfo* info, bool* cloned) {
- ASSERT_EQ(false, *cloned);
- siblings_.Ensure(this);
- RegExpNode* result = TryGetSibling(info);
- if (result != NULL) return result;
- result = this->Clone();
- NodeInfo* new_info = result->info();
- new_info->ResetCompilationState();
- new_info->AddFromPreceding(info);
- AddSibling(result);
- *cloned = true;
- return result;
-}
-
-
-template <class C>
-static RegExpNode* PropagateToEndpoint(C* node, NodeInfo* info) {
- NodeInfo full_info(*node->info());
- full_info.AddFromPreceding(info);
- bool cloned = false;
- return RegExpNode::EnsureSibling(node, &full_info, &cloned);
-}
-
-
// -------------------------------------------------------------------
// Splay tree
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 4847739..4053799 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -467,25 +467,6 @@
};
-class SiblingList {
- public:
- SiblingList() : list_(NULL) { }
- int length() {
- return list_ == NULL ? 0 : list_->length();
- }
- void Ensure(RegExpNode* parent) {
- if (list_ == NULL) {
- list_ = new ZoneList<RegExpNode*>(2);
- list_->Add(parent);
- }
- }
- void Add(RegExpNode* node) { list_->Add(node); }
- RegExpNode* Get(int index) { return list_->at(index); }
- private:
- ZoneList<RegExpNode*>* list_;
-};
-
-
// Details of a quick mask-compare check that can look ahead in the
// input stream.
class QuickCheckDetails {
@@ -540,7 +521,7 @@
class RegExpNode: public ZoneObject {
public:
- RegExpNode() : first_character_set_(NULL), trace_count_(0) {
+ RegExpNode() : trace_count_(0) {
bm_info_[0] = bm_info_[1] = NULL;
}
virtual ~RegExpNode();
@@ -609,47 +590,15 @@
NodeInfo* info() { return &info_; }
- void AddSibling(RegExpNode* node) { siblings_.Add(node); }
-
- // Static version of EnsureSibling that expresses the fact that the
- // result has the same type as the input.
- template <class C>
- static C* EnsureSibling(C* node, NodeInfo* info, bool* cloned) {
- return static_cast<C*>(node->EnsureSibling(info, cloned));
- }
-
- SiblingList* siblings() { return &siblings_; }
- void set_siblings(SiblingList* other) { siblings_ = *other; }
-
- // Get and set the cached first character set value.
- ZoneList<CharacterRange>* first_character_set() {
- return first_character_set_;
- }
- void set_first_character_set(ZoneList<CharacterRange>* character_set) {
- first_character_set_ = character_set;
- }
BoyerMooreLookahead* bm_info(bool not_at_start) {
return bm_info_[not_at_start ? 1 : 0];
}
protected:
enum LimitResult { DONE, CONTINUE };
- static const int kComputeFirstCharacterSetFail = -1;
LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
- // Returns a sibling of this node whose interests and assumptions
- // match the ones in the given node info. If no sibling exists NULL
- // is returned.
- RegExpNode* TryGetSibling(NodeInfo* info);
-
- // Returns a sibling of this node whose interests match the ones in
- // the given node info. The info must not contain any assertions.
- // If no node exists a new one will be created by cloning the current
- // node. The result will always be an instance of the same concrete
- // class as this node.
- RegExpNode* EnsureSibling(NodeInfo* info, bool* cloned);
-
// Returns a clone of this node initialized using the copy constructor
// of its concrete class. Note that the node may have to be pre-
// processed before it is on a usable state.
@@ -663,8 +612,6 @@
static const int kFirstCharBudget = 10;
Label label_;
NodeInfo info_;
- SiblingList siblings_;
- ZoneList<CharacterRange>* first_character_set_;
// This variable keeps track of how many times code has been generated for
// this node (in different traces). We don't keep track of where the
// generated code is located unless the code is generated at the start of
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index abfb0f6..4463c93 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -159,6 +159,11 @@
preview_description.stack_modified = dropped_functions_number != 0;
+ // Our current implementation requires client to manually issue "step in"
+ // command for correct stack state.
+ preview_description.stack_update_needs_step_in =
+ preview_description.stack_modified;
+
// Start with breakpoints. Convert their line/column positions and
// temporary remove.
var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
diff --git a/src/macros.py b/src/macros.py
index 699b368..08fa82e 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -196,6 +196,7 @@
macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
# Last input and last subject of regexp matches.
+const LAST_SUBJECT_INDEX = 1;
macro LAST_SUBJECT(array) = ((array)[1]);
macro LAST_INPUT(array) = ((array)[2]);
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 51c2e46..62f3155 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -447,6 +447,8 @@
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
@@ -454,7 +456,7 @@
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = 7 * kPointerSize;
+ unsigned fixed_frame_size = 8 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -519,6 +521,15 @@
top_address + output_offset, output_offset, value);
}
+ // The output frame reflects a JSConstructStubGeneric frame.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(construct_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -549,8 +560,6 @@
ASSERT(0 == output_offset);
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index b69afe2..9e42e93 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1316,6 +1316,12 @@
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 677d567..15bdc52 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3511,7 +3511,6 @@
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -3562,6 +3561,8 @@
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_reenable_tries, kOptReenableTriesOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -3615,6 +3616,11 @@
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ic_age, kICAgeOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
+ opt_reenable_tries,
+ kOptReenableTriesOffset)
#endif
@@ -3654,6 +3660,7 @@
// it will not be counted as optimizable code.
if ((code()->kind() == Code::FUNCTION) && disable) {
code()->set_optimizable(false);
+ code()->set_profiler_ticks(0);
}
}
@@ -3689,6 +3696,23 @@
return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
}
+
+void SharedFunctionInfo::TryReenableOptimization() {
+ int tries = opt_reenable_tries();
+ if (tries == Smi::kMaxValue) {
+ tries = 0;
+ }
+ set_opt_reenable_tries(tries + 1);
+ // We reenable optimization whenever the number of tries is a large
+ // enough power of 2.
+ if (tries >= 4 && (((tries - 1) & tries) == 0)) {
+ set_optimization_disabled(false);
+ set_opt_count(0);
+ code()->set_optimizable(true);
+ }
+}
+
+
BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
diff --git a/src/objects.cc b/src/objects.cc
index ef9d498..41f2ab6 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -7871,6 +7871,7 @@
code()->set_optimizable(true);
}
set_opt_count(0);
+ set_opt_reenable_tries(0);
}
}
diff --git a/src/objects.h b/src/objects.h
index 80d1fd4..3ec9428 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -5461,6 +5461,13 @@
inline int opt_count();
inline void set_opt_count(int opt_count);
+ // Number of times we tried to reenable optimization.
+ inline int opt_reenable_tries();
+ inline void set_opt_reenable_tries(int opt_reenable_tries);
+
+ inline void TryReenableOptimization();
+
+
// Source size of this function.
int SourceSize();
@@ -5517,13 +5524,10 @@
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
- // ic_age is a Smi field. It could be grouped with another Smi field into a
- // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
- static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5543,10 +5547,10 @@
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
-
-
+ static const int kICAgeOffset = kDeoptCounterOffset + kPointerSize;
+ static const int kOptReenableTriesOffset = kICAgeOffset + kPointerSize;
// Total size.
- static const int kSize = kDeoptCounterOffset + kPointerSize;
+ static const int kSize = kOptReenableTriesOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -5558,7 +5562,7 @@
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -5585,8 +5589,11 @@
static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
+ static const int kICAgeOffset = kDeoptCounterOffset + kIntSize;
+ static const int kOptReenableTriesOffset = kICAgeOffset + kIntSize;
+
// Total size.
- static const int kSize = kDeoptCounterOffset + kIntSize;
+ static const int kSize = kOptReenableTriesOffset + kIntSize;
#endif
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 0106e57..4248ea2 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -487,12 +487,10 @@
void Thread::Start() {
- pthread_attr_t* attr_ptr = NULL;
pthread_attr_t attr;
if (stack_size_ > 0) {
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- attr_ptr = &attr;
}
pthread_create(&data_->thread_, NULL, ThreadEntry, this);
ASSERT(data_->thread_ != kNoThread);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index a0429f3..c91e83b 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1978,201 +1978,28 @@
bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
- Object* object = proxy->map()->prototype();
- bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- is_debug_object = object->IsGlobalObject() &&
- Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
- if (!is_debug_object) {
- SetUserGlobalReference(object);
- }
+ ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
} else if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- ExtractClosureReferences(js_obj, entry);
- ExtractPropertyReferences(js_obj, entry);
- ExtractElementReferences(js_obj, entry);
- ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(
- obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
- if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), proto_or_map,
- NULL,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), js_fun->prototype());
- }
- }
- SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
- JSFunction::kLiteralsOffset);
- TagObject(shared_info, "(shared function info)");
- SetInternalReference(js_fun, entry,
- "shared", shared_info,
- JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->unchecked_context(), "(context)");
- SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
- JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "global_context", global_obj->global_context(),
- GlobalObject::kGlobalContextOffset);
- SetInternalReference(global_obj, entry,
- "global_receiver", global_obj->global_receiver(),
- GlobalObject::kGlobalReceiverOffset);
- }
- TagObject(js_obj->properties(), "(object properties)");
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
- TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
- JSObject::kElementsOffset);
+ ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
- if (obj->IsConsString()) {
- ConsString* cs = ConsString::cast(obj);
- SetInternalReference(obj, entry, 1, cs->first());
- SetInternalReference(obj, entry, 2, cs->second());
- }
- if (obj->IsSlicedString()) {
- SlicedString* ss = SlicedString::cast(obj);
- SetInternalReference(obj, entry, "parent", ss->parent());
- }
+ ExtractStringReferences(entry, String::cast(obj));
extract_indexed_refs = false;
- } else if (obj->IsGlobalContext()) {
- Context* context = Context::cast(obj);
- TagObject(context->jsfunction_result_caches(),
- "(context func. result caches)");
- TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->data(), "(context data)");
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::GLOBAL_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(obj, entry,
- i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
+ } else if (obj->IsContext()) {
+ ExtractContextReferences(entry, Context::cast(obj));
} else if (obj->IsMap()) {
- Map* map = Map::cast(obj);
- SetInternalReference(obj, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(obj, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
- if (!map->instance_descriptors()->IsEmpty()) {
- TagObject(map->instance_descriptors(), "(map descriptors)");
- SetInternalReference(obj, entry,
- "descriptors", map->instance_descriptors(),
- Map::kInstanceDescriptorsOrBitField3Offset);
- }
- TagObject(map->prototype_transitions(), "(prototype transitions)");
- SetInternalReference(obj, entry,
- "prototype_transitions", map->prototype_transitions(),
- Map::kPrototypeTransitionsOffset);
- SetInternalReference(obj, entry,
- "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
+ ExtractMapReferences(entry, Map::cast(obj));
} else if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- SetInternalReference(obj, entry,
- "name", shared->name(),
- SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
- SetInternalReference(obj, entry,
- "code", shared->code(),
- SharedFunctionInfo::kCodeOffset);
- TagObject(shared->scope_info(), "(function scope info)");
- SetInternalReference(obj, entry,
- "scope_info", shared->scope_info(),
- SharedFunctionInfo::kScopeInfoOffset);
- SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
- SetInternalReference(obj, entry,
- "construct_stub", shared->construct_stub(),
- SharedFunctionInfo::kConstructStubOffset);
- SetInternalReference(obj, entry,
- "function_data", shared->function_data(),
- SharedFunctionInfo::kFunctionDataOffset);
- SetInternalReference(obj, entry,
- "debug_info", shared->debug_info(),
- SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry,
- "inferred_name", shared->inferred_name(),
- SharedFunctionInfo::kInferredNameOffset);
- SetInternalReference(obj, entry,
- "this_property_assignments",
- shared->this_property_assignments(),
- SharedFunctionInfo::kThisPropertyAssignmentsOffset);
- SetWeakReference(obj, entry,
- 1, shared->initial_map(),
- SharedFunctionInfo::kInitialMapOffset);
+ ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
} else if (obj->IsScript()) {
- Script* script = Script::cast(obj);
- SetInternalReference(obj, entry,
- "source", script->source(),
- Script::kSourceOffset);
- SetInternalReference(obj, entry,
- "name", script->name(),
- Script::kNameOffset);
- SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
- "context_data", script->context_data(),
- Script::kContextOffset);
- TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(obj, entry,
- "line_ends", script->line_ends(),
- Script::kLineEndsOffset);
+ ExtractScriptReferences(entry, Script::cast(obj));
} else if (obj->IsCodeCache()) {
- CodeCache* code_cache = CodeCache::cast(obj);
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(obj, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(obj, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
+ ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- TagObject(code->unchecked_relocation_info(), "(code relocation info)");
- TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
+ ExtractCodeReferences(entry, Code::cast(obj));
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ ExtractJSGlobalPropertyCellReferences(
+ entry, JSGlobalPropertyCell::cast(obj));
+ extract_indexed_refs = false;
}
if (extract_indexed_refs) {
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
@@ -2182,14 +2009,262 @@
}
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
+ // We need to reference JS global objects from snapshot's root.
+ // We use JSGlobalProxy because this is what embedder (e.g. browser)
+ // uses for the global object.
+ Object* object = proxy->map()->prototype();
+ bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ is_debug_object = object->IsGlobalObject() &&
+ Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
+#endif
+ if (!is_debug_object) {
+ SetUserGlobalReference(object);
+ }
+}
+
+
+void V8HeapExplorer::ExtractJSObjectReferences(
+ HeapEntry* entry, JSObject* js_obj) {
+ HeapObject* obj = js_obj;
+ ExtractClosureReferences(js_obj, entry);
+ ExtractPropertyReferences(js_obj, entry);
+ ExtractElementReferences(js_obj, entry);
+ ExtractInternalReferences(js_obj, entry);
+ SetPropertyReference(
+ obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
+ if (obj->IsJSFunction()) {
+ JSFunction* js_fun = JSFunction::cast(js_obj);
+ Object* proto_or_map = js_fun->prototype_or_initial_map();
+ if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsMap()) {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_symbol(), proto_or_map,
+ NULL,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ } else {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_symbol(), js_fun->prototype());
+ }
+ }
+ SharedFunctionInfo* shared_info = js_fun->shared();
+ // JSFunction has either bindings or literals and never both.
+ bool bound = shared_info->bound();
+ TagObject(js_fun->literals_or_bindings(),
+ bound ? "(function bindings)" : "(function literals)");
+ SetInternalReference(js_fun, entry,
+ bound ? "bindings" : "literals",
+ js_fun->literals_or_bindings(),
+ JSFunction::kLiteralsOffset);
+ TagObject(shared_info, "(shared function info)");
+ SetInternalReference(js_fun, entry,
+ "shared", shared_info,
+ JSFunction::kSharedFunctionInfoOffset);
+ TagObject(js_fun->unchecked_context(), "(context)");
+ SetInternalReference(js_fun, entry,
+ "context", js_fun->unchecked_context(),
+ JSFunction::kContextOffset);
+ for (int i = JSFunction::kNonWeakFieldsEndOffset;
+ i < JSFunction::kSize;
+ i += kPointerSize) {
+ SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+ }
+ } else if (obj->IsGlobalObject()) {
+ GlobalObject* global_obj = GlobalObject::cast(obj);
+ SetInternalReference(global_obj, entry,
+ "builtins", global_obj->builtins(),
+ GlobalObject::kBuiltinsOffset);
+ SetInternalReference(global_obj, entry,
+ "global_context", global_obj->global_context(),
+ GlobalObject::kGlobalContextOffset);
+ SetInternalReference(global_obj, entry,
+ "global_receiver", global_obj->global_receiver(),
+ GlobalObject::kGlobalReceiverOffset);
+ }
+ TagObject(js_obj->properties(), "(object properties)");
+ SetInternalReference(obj, entry,
+ "properties", js_obj->properties(),
+ JSObject::kPropertiesOffset);
+ TagObject(js_obj->elements(), "(object elements)");
+ SetInternalReference(obj, entry,
+ "elements", js_obj->elements(),
+ JSObject::kElementsOffset);
+}
+
+
+void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String* string) {
+ if (string->IsConsString()) {
+ ConsString* cs = ConsString::cast(string);
+ SetInternalReference(cs, entry, "first", cs->first());
+ SetInternalReference(cs, entry, "second", cs->second());
+ } else if (string->IsSlicedString()) {
+ SlicedString* ss = SlicedString::cast(string);
+ SetInternalReference(ss, entry, "parent", ss->parent());
+ }
+}
+
+
+void V8HeapExplorer::ExtractContextReferences(
+ HeapEntry* entry, Context* context) {
+#define EXTRACT_CONTEXT_FIELD(index, type, name) \
+ SetInternalReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index));
+ EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
+ EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
+ EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
+ EXTRACT_CONTEXT_FIELD(GLOBAL_INDEX, GlobalObject, global);
+ if (context->IsGlobalContext()) {
+ TagObject(context->jsfunction_result_caches(),
+ "(context func. result caches)");
+ TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context->runtime_context(), "(runtime context)");
+ TagObject(context->data(), "(context data)");
+ GLOBAL_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+#undef EXTRACT_CONTEXT_FIELD
+ for (int i = Context::FIRST_WEAK_SLOT;
+ i < Context::GLOBAL_CONTEXT_SLOTS;
+ ++i) {
+ SetWeakReference(context, entry, i, context->get(i),
+ FixedArray::OffsetOfElementAt(i));
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
+ SetInternalReference(map, entry,
+ "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry,
+ "constructor", map->constructor(),
+ Map::kConstructorOffset);
+ if (!map->instance_descriptors()->IsEmpty()) {
+ TagObject(map->instance_descriptors(), "(map descriptors)");
+ SetInternalReference(map, entry,
+ "descriptors", map->instance_descriptors(),
+ Map::kInstanceDescriptorsOrBitField3Offset);
+ }
+ TagObject(map->prototype_transitions(), "(prototype transitions)");
+ SetInternalReference(map, entry,
+ "prototype_transitions", map->prototype_transitions(),
+ Map::kPrototypeTransitionsOffset);
+ SetInternalReference(map, entry,
+ "code_cache", map->code_cache(),
+ Map::kCodeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
+ HeapEntry* entry, SharedFunctionInfo* shared) {
+ HeapObject* obj = shared;
+ SetInternalReference(obj, entry,
+ "name", shared->name(),
+ SharedFunctionInfo::kNameOffset);
+ TagObject(shared->code(), "(code)");
+ SetInternalReference(obj, entry,
+ "code", shared->code(),
+ SharedFunctionInfo::kCodeOffset);
+ TagObject(shared->scope_info(), "(function scope info)");
+ SetInternalReference(obj, entry,
+ "scope_info", shared->scope_info(),
+ SharedFunctionInfo::kScopeInfoOffset);
+ SetInternalReference(obj, entry,
+ "instance_class_name", shared->instance_class_name(),
+ SharedFunctionInfo::kInstanceClassNameOffset);
+ SetInternalReference(obj, entry,
+ "script", shared->script(),
+ SharedFunctionInfo::kScriptOffset);
+ TagObject(shared->construct_stub(), "(code)");
+ SetInternalReference(obj, entry,
+ "construct_stub", shared->construct_stub(),
+ SharedFunctionInfo::kConstructStubOffset);
+ SetInternalReference(obj, entry,
+ "function_data", shared->function_data(),
+ SharedFunctionInfo::kFunctionDataOffset);
+ SetInternalReference(obj, entry,
+ "debug_info", shared->debug_info(),
+ SharedFunctionInfo::kDebugInfoOffset);
+ SetInternalReference(obj, entry,
+ "inferred_name", shared->inferred_name(),
+ SharedFunctionInfo::kInferredNameOffset);
+ SetInternalReference(obj, entry,
+ "this_property_assignments",
+ shared->this_property_assignments(),
+ SharedFunctionInfo::kThisPropertyAssignmentsOffset);
+ SetWeakReference(obj, entry,
+ 1, shared->initial_map(),
+ SharedFunctionInfo::kInitialMapOffset);
+}
+
+
+void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
+ HeapObject* obj = script;
+ SetInternalReference(obj, entry,
+ "source", script->source(),
+ Script::kSourceOffset);
+ SetInternalReference(obj, entry,
+ "name", script->name(),
+ Script::kNameOffset);
+ SetInternalReference(obj, entry,
+ "data", script->data(),
+ Script::kDataOffset);
+ SetInternalReference(obj, entry,
+ "context_data", script->context_data(),
+ Script::kContextOffset);
+ TagObject(script->line_ends(), "(script line ends)");
+ SetInternalReference(obj, entry,
+ "line_ends", script->line_ends(),
+ Script::kLineEndsOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeCacheReferences(
+ HeapEntry* entry, CodeCache* code_cache) {
+ TagObject(code_cache->default_cache(), "(default code cache)");
+ SetInternalReference(code_cache, entry,
+ "default_cache", code_cache->default_cache(),
+ CodeCache::kDefaultCacheOffset);
+ TagObject(code_cache->normal_type_cache(), "(code type cache)");
+ SetInternalReference(code_cache, entry,
+ "type_cache", code_cache->normal_type_cache(),
+ CodeCache::kNormalTypeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
+ TagObject(code->relocation_info(), "(code relocation info)");
+ SetInternalReference(code, entry,
+ "relocation_info", code->relocation_info(),
+ Code::kRelocationInfoOffset);
+ SetInternalReference(code, entry,
+ "handler_table", code->handler_table(),
+ Code::kHandlerTableOffset);
+ TagObject(code->deoptimization_data(), "(code deopt data)");
+ SetInternalReference(code, entry,
+ "deoptimization_data", code->deoptimization_data(),
+ Code::kDeoptimizationDataOffset);
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
+ SetInternalReference(code, entry,
+ "gc_metadata", code->gc_metadata(),
+ Code::kGCMetadataOffset);
+}
+
+
+void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
+ HeapEntry* entry, JSGlobalPropertyCell* cell) {
+ SetInternalReference(cell, entry, "value", cell->value());
+}
+
+
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
HeapEntry* entry) {
if (!js_obj->IsJSFunction()) return;
JSFunction* func = JSFunction::cast(js_obj);
- Context* context = func->context()->declaration_context();
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
-
if (func->shared()->bound()) {
FixedArray* bindings = func->function_bindings();
SetNativeBindReference(js_obj, entry, "bound_this",
@@ -2205,6 +2280,8 @@
bindings->get(i));
}
} else {
+ Context* context = func->context()->declaration_context();
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
// Add context allocated locals.
int context_locals = scope_info->ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 3b748e2..e04ddbf 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -975,7 +975,20 @@
int children_count,
int retainers_count);
const char* GetSystemEntryName(HeapObject* object);
+
void ExtractReferences(HeapObject* obj);
+ void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
+ void ExtractJSObjectReferences(HeapEntry* entry, JSObject* js_obj);
+ void ExtractStringReferences(HeapEntry* entry, String* obj);
+ void ExtractContextReferences(HeapEntry* entry, Context* context);
+ void ExtractMapReferences(HeapEntry* entry, Map* map);
+ void ExtractSharedFunctionInfoReferences(HeapEntry* entry,
+ SharedFunctionInfo* shared);
+ void ExtractScriptReferences(HeapEntry* entry, Script* script);
+ void ExtractCodeCacheReferences(HeapEntry* entry, CodeCache* code_cache);
+ void ExtractCodeReferences(HeapEntry* entry, Code* code);
+ void ExtractJSGlobalPropertyCellReferences(HeapEntry* entry,
+ JSGlobalPropertyCell* cell);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
diff --git a/src/regexp.js b/src/regexp.js
index 7bcb612..a574f62 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -278,11 +278,7 @@
function RegExpToString() {
- // If this.source is an empty string, output /(?:)/.
- // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
- // ecma_2/RegExp/properties-001.js.
- var src = this.source ? this.source : '(?:)';
- var result = '/' + src + '/';
+ var result = '/' + this.source + '/';
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
if (this.multiline) result += 'm';
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 568e48e..33481d4 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,11 +65,17 @@
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If the function optimization was disabled due to high deoptimization count,
+// but the function is hot and has been seen on the stack this number of times,
+// then we try to reenable optimization for this function.
+static const int kProfilerTicksBeforeReenablingOptimization = 250;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
+STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
@@ -263,7 +269,8 @@
}
}
- Code* shared_code = function->shared()->code();
+ SharedFunctionInfo* shared = function->shared();
+ Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsMarkedForLazyRecompilation()) {
@@ -273,20 +280,32 @@
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
- // Do not record non-optimizable functions.
- if (!function->IsOptimizable()) continue;
- if (function->shared()->optimization_disabled()) continue;
-
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
- if (function->shared()->is_toplevel()
- && (frame_count > 1
- || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
+ if (shared->is_toplevel() &&
+ (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
continue;
}
+ // Do not record non-optimizable functions.
+ if (shared->optimization_disabled()) {
+ if (shared->opt_count() >= Compiler::kDefaultMaxOptCount) {
+ // If optimization was disabled due to many deoptimizations,
+ // then check if the function is hot and try to reenable optimization.
+ int ticks = shared_code->profiler_ticks();
+ if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+ shared_code->set_profiler_ticks(0);
+ shared->TryReenableOptimization();
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+ }
+ continue;
+ }
+ if (!function->IsOptimizable()) continue;
+
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
@@ -309,7 +328,7 @@
}
}
} else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
@@ -319,7 +338,7 @@
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
- int function_size = function->shared()->SourceSize();
+ int function_size = shared->SourceSize();
int threshold_size_factor = (function_size > kSizeLimit)
? sampler_threshold_size_factor_
: 1;
diff --git a/src/runtime.cc b/src/runtime.cc
index 630d3a7..7b7a3e4 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1783,6 +1783,9 @@
ASSERT(args.length() == 5);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, source, 1);
+ // If source is the empty string we set it to "(?:)" instead as
+ // suggested by ECMA-262, 5th, section 15.10.4.1.
+ if (source->length() == 0) source = isolate->heap()->query_colon_symbol();
Object* global = args[2];
if (!global->IsTrue()) global = isolate->heap()->false_value();
@@ -2882,12 +2885,79 @@
}
+// Two smis before and after the match, for very long strings.
+const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+
+static void SetLastMatchInfoNoCaptures(Handle<String> subject,
+ Handle<JSArray> last_match_info,
+ int match_start,
+ int match_end) {
+ // Fill last_match_info with a single capture.
+ last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
+ AssertNoAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(last_match_info->elements());
+ RegExpImpl::SetLastCaptureCount(elements, 2);
+ RegExpImpl::SetLastInput(elements, *subject);
+ RegExpImpl::SetLastSubject(elements, *subject);
+ RegExpImpl::SetCapture(elements, 0, match_start);
+ RegExpImpl::SetCapture(elements, 1, match_end);
+}
+
+
+template <typename SubjectChar, typename PatternChar>
+static bool SearchStringMultiple(Isolate* isolate,
+ Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
+ String* pattern_string,
+ FixedArrayBuilder* builder,
+ int* match_pos) {
+ int pos = *match_pos;
+ int subject_length = subject.length();
+ int pattern_length = pattern.length();
+ int max_search_start = subject_length - pattern_length;
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
+ while (pos <= max_search_start) {
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ // Position of end of previous match.
+ int match_end = pos + pattern_length;
+ int new_pos = search.Search(subject, match_end);
+ if (new_pos >= 0) {
+ // A match.
+ if (new_pos > match_end) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ new_pos);
+ }
+ pos = new_pos;
+ builder->Add(pattern_string);
+ } else {
+ break;
+ }
+ }
+
+ if (pos < max_search_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ pos + pattern_length,
+ subject_length);
+ }
+ *match_pos = pos;
+ return true;
+}
+
+
+
+
template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceStringWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
- Handle<String> replacement) {
+ Handle<String> replacement,
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
@@ -2946,6 +3016,12 @@
subject_pos,
subject_len);
}
+
+ SetLastMatchInfoNoCaptures(subject,
+ last_match_info,
+ indices.at(matches - 1),
+ indices.at(matches - 1) + pattern_len);
+
return *result;
}
@@ -2994,11 +3070,19 @@
compiled_replacement.simple_hint()) {
if (subject_handle->HasOnlyAsciiChars() &&
replacement_handle->HasOnlyAsciiChars()) {
- return StringReplaceStringWithString<SeqAsciiString>(
- isolate, subject_handle, regexp_handle, replacement_handle);
+ return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ isolate,
+ subject_handle,
+ regexp_handle,
+ replacement_handle,
+ last_match_info_handle);
} else {
- return StringReplaceStringWithString<SeqTwoByteString>(
- isolate, subject_handle, regexp_handle, replacement_handle);
+ return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+ isolate,
+ subject_handle,
+ regexp_handle,
+ replacement_handle,
+ last_match_info_handle);
}
}
@@ -3087,21 +3171,29 @@
Handle<String> subject_handle(subject);
Handle<JSRegExp> regexp_handle(regexp);
+ Handle<JSArray> last_match_info_handle(last_match_info);
// Shortcut for simple non-regexp global replacements
if (regexp_handle->GetFlags().is_global() &&
regexp_handle->TypeTag() == JSRegExp::ATOM) {
Handle<String> empty_string_handle(HEAP->empty_string());
if (subject_handle->HasOnlyAsciiChars()) {
- return StringReplaceStringWithString<SeqAsciiString>(
- isolate, subject_handle, regexp_handle, empty_string_handle);
+ return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ isolate,
+ subject_handle,
+ regexp_handle,
+ empty_string_handle,
+ last_match_info_handle);
} else {
- return StringReplaceStringWithString<SeqTwoByteString>(
- isolate, subject_handle, regexp_handle, empty_string_handle);
+ return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+ isolate,
+ subject_handle,
+ regexp_handle,
+ empty_string_handle,
+ last_match_info_handle);
}
}
- Handle<JSArray> last_match_info_handle(last_match_info);
Handle<Object> match = RegExpImpl::Exec(regexp_handle,
subject_handle,
0,
@@ -3121,6 +3213,10 @@
end = RegExpImpl::GetCapture(match_info_array, 1);
}
+ bool global = regexp_handle->GetFlags().is_global();
+
+ if (start == end && !global) return *subject_handle;
+
int length = subject_handle->length();
int new_length = length - (end - start);
if (new_length == 0) {
@@ -3136,7 +3232,7 @@
}
// If the regexp isn't global, only match once.
- if (!regexp_handle->GetFlags().is_global()) {
+ if (!global) {
if (start > 0) {
String::WriteToFlat(*subject_handle,
answer->GetChars(),
@@ -3635,70 +3731,6 @@
}
-// Two smis before and after the match, for very long strings.
-const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
-
-static void SetLastMatchInfoNoCaptures(Handle<String> subject,
- Handle<JSArray> last_match_info,
- int match_start,
- int match_end) {
- // Fill last_match_info with a single capture.
- last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_info->elements());
- RegExpImpl::SetLastCaptureCount(elements, 2);
- RegExpImpl::SetLastInput(elements, *subject);
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetCapture(elements, 0, match_start);
- RegExpImpl::SetCapture(elements, 1, match_end);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- String* pattern_string,
- FixedArrayBuilder* builder,
- int* match_pos) {
- int pos = *match_pos;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = search.Search(subject, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern_string);
- } else {
- break;
- }
- }
-
- if (pos < max_search_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- pos + pattern_length,
- subject_length);
- }
- *match_pos = pos;
- return true;
-}
-
-
static bool SearchStringMultiple(Isolate* isolate,
Handle<String> subject,
Handle<String> pattern,
@@ -3838,6 +3870,8 @@
}
+// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
+// separate last match info. See comment on that function.
static RegExpImpl::IrregexpResult SearchRegExpMultiple(
Isolate* isolate,
Handle<String> subject,
@@ -3866,10 +3900,6 @@
// End of previous match. Differs from pos if match was empty.
int match_end = 0;
if (result == RegExpImpl::RE_SUCCESS) {
- // Need to keep a copy of the previous match for creating last_match_info
- // at the end, so we have two vectors that we swap between.
- OffsetsVector registers2(required_registers, isolate);
- Vector<int> prev_register_vector(registers2.vector(), registers2.length());
bool first = true;
do {
int match_start = register_vector[0];
@@ -3922,11 +3952,6 @@
elements->set(capture_count + 2, *subject);
builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
}
- // Swap register vectors, so the last successful match is in
- // prev_register_vector.
- Vector<int32_t> tmp = prev_register_vector;
- prev_register_vector = register_vector;
- register_vector = tmp;
if (match_end > match_start) {
pos = match_end;
@@ -3958,12 +3983,12 @@
last_match_array->EnsureSize(last_match_array_size);
AssertNoAllocation no_gc;
FixedArray* elements = FixedArray::cast(last_match_array->elements());
+ // We have to set this even though the rest of the last match array is
+ // ignored.
RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
+ // These are also read without consulting the override.
RegExpImpl::SetLastSubject(elements, *subject);
RegExpImpl::SetLastInput(elements, *subject);
- for (int i = 0; i < last_match_capture_count; i++) {
- RegExpImpl::SetCapture(elements, i, prev_register_vector[i]);
- }
return RegExpImpl::RE_SUCCESS;
}
}
@@ -3972,6 +3997,9 @@
}
+// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
+// lastMatchInfoOverride to maintain the last match info, so we don't need to
+// set any other last match array info.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
ASSERT(args.length() == 4);
HandleScope handles(isolate);
diff --git a/src/serialize.cc b/src/serialize.cc
index 01d5f1c..cf8e5e1 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -244,7 +244,7 @@
"Isolate::" #hacker_name "_address",
FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
NULL
-#undef C
+#undef BUILD_NAME_LITERAL
};
for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 55bf222..ed78fc7 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -295,11 +295,27 @@
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
+#ifdef DEBUG
+ // If we are stressing compaction we waste some memory in new space
+ // in order to get more frequent GCs.
+ if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
+ if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ int filler_size = size_in_bytes * 4;
+ for (int i = 0; i < filler_size; i += kPointerSize) {
+ *(reinterpret_cast<Object**>(old_top + i)) =
+ HEAP->one_pointer_filler_map();
+ }
+ old_top += filler_size;
+ allocation_info_.top += filler_size;
+ }
+ }
+#endif
+
if (allocation_info_.limit - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(allocation_info_.top);
+ Object* obj = HeapObject::FromAddress(old_top);
allocation_info_.top += size_in_bytes;
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
diff --git a/src/string.js b/src/string.js
index bd996de..1fcabc4 100644
--- a/src/string.js
+++ b/src/string.js
@@ -237,10 +237,28 @@
replace);
}
} else {
- return %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
+ if (lastMatchInfoOverride == null) {
+ return %StringReplaceRegExpWithString(subject,
+ search,
+ TO_STRING_INLINE(replace),
+ lastMatchInfo);
+ } else {
+ // We use this hack to detect whether StringReplaceRegExpWithString
+ // found at least one hit. In that case we need to remove any
+ // override.
+ var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
+ lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
+ var answer = %StringReplaceRegExpWithString(subject,
+ search,
+ TO_STRING_INLINE(replace),
+ lastMatchInfo);
+ if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
+ lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
+ } else {
+ lastMatchInfoOverride = null;
+ }
+ return answer;
+ }
}
}
@@ -429,14 +447,22 @@
return subject;
}
var len = res.length;
- var i = 0;
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+ // If the number of captures is two then there are no explicit captures in
+ // the regexp, just the implicit capture that captures the whole match. In
+ // this case we can simplify quite a bit and end up with something faster.
+ // The builder will consist of some integers that indicate slices of the
+ // input string and some replacements that were returned from the replace
+ // function.
var match_start = 0;
var override = new InternalArray(null, 0, subject);
var receiver = %GetDefaultReceiver(replace);
- while (i < len) {
+ for (var i = 0; i < len; i++) {
var elem = res[i];
if (%_IsSmi(elem)) {
+ // Integers represent slices of the original string. Use these to
+ // get the offsets we need for the override array (so things like
+ // RegExp.leftContext work during the callback function.
if (elem > 0) {
match_start = (elem >> 11) + (elem & 0x7ff);
} else {
@@ -448,23 +474,25 @@
lastMatchInfoOverride = override;
var func_result =
%_CallFunction(receiver, elem, match_start, subject, replace);
+ // Overwrite the i'th element in the results with the string we got
+ // back from the callback function.
res[i] = TO_STRING_INLINE(func_result);
match_start += elem.length;
}
- i++;
}
} else {
var receiver = %GetDefaultReceiver(replace);
- while (i < len) {
+ for (var i = 0; i < len; i++) {
var elem = res[i];
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
lastMatchInfoOverride = elem;
var func_result = %Apply(replace, receiver, elem, 0, elem.length);
+ // Overwrite the i'th element in the results with the string we got
+ // back from the callback function.
res[i] = TO_STRING_INLINE(func_result);
}
- i++;
}
}
var resultBuilder = new ReplaceResultBuilder(subject, res);
diff --git a/src/utils.cc b/src/utils.cc
index 89ef4c6..7e8c088 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -89,4 +89,19 @@
return buffer_.start();
}
+
+const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
+ switch (divisor) {
+ case 3: return DivMagicNumberFor3;
+ case 5: return DivMagicNumberFor5;
+ case 7: return DivMagicNumberFor7;
+ case 9: return DivMagicNumberFor9;
+ case 11: return DivMagicNumberFor11;
+ case 25: return DivMagicNumberFor25;
+ case 125: return DivMagicNumberFor125;
+ case 625: return DivMagicNumberFor625;
+ default: return InvalidDivMagicNumber;
+ }
+}
+
} } // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index 1d40c98..f116c14 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -85,6 +85,32 @@
}
+// Magic numbers for integer division.
+// These are kind of 2's complement reciprocal of the divisors.
+// Details and proofs can be found in:
+// - Hacker's Delight, Henry S. Warren, Jr.
+// - The PowerPC Compiler Writer’s Guide
+// and probably many others.
+// See details in the implementation of the algorithm in
+// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
+struct DivMagicNumbers {
+ unsigned M;
+ unsigned s;
+};
+
+const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
+const DivMagicNumbers DivMagicNumberFor3 = {0x55555556, 0};
+const DivMagicNumbers DivMagicNumberFor5 = {0x66666667, 1};
+const DivMagicNumbers DivMagicNumberFor7 = {0x92492493, 2};
+const DivMagicNumbers DivMagicNumberFor9 = {0x38e38e39, 1};
+const DivMagicNumbers DivMagicNumberFor11 = {0x2e8ba2e9, 1};
+const DivMagicNumbers DivMagicNumberFor25 = {0x51eb851f, 3};
+const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
+const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
+
+const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
+
+
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 6db9c77..94a5264 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -236,6 +236,8 @@
SC(math_sin, V8.MathSin) \
SC(math_sqrt, V8.MathSqrt) \
SC(math_tan, V8.MathTan) \
+ SC(array_bounds_checks_seen, V8.ArrayBoundsChecksSeen) \
+ SC(array_bounds_checks_removed, V8.ArrayBoundsChecksRemoved) \
SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
diff --git a/src/version.cc b/src/version.cc
index e3dc525..74e287e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 10
-#define BUILD_NUMBER 5
+#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 40b9a1c..f3046b9 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -458,6 +458,8 @@
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
@@ -465,7 +467,7 @@
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = 6 * kPointerSize;
+ unsigned fixed_frame_size = 7 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -534,6 +536,16 @@
top_address + output_offset, output_offset, value);
}
+ // The output frame reflects a JSConstructStubGeneric frame.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(construct_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
@@ -557,8 +569,6 @@
ASSERT(0 == output_offset);
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
intptr_t pc = reinterpret_cast<intptr_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 11517bd..2a9d4f7 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1305,6 +1305,12 @@
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index d841baf..87860f4 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -16448,3 +16448,95 @@
TEST(PrimaryStubCache) {
StubCacheHelper(false);
}
+
+
+static int fatal_error_callback_counter = 0;
+static void CountingErrorCallback(const char* location, const char* message) {
+ printf("CountingErrorCallback(\"%s\", \"%s\")\n", location, message);
+ fatal_error_callback_counter++;
+}
+
+
+TEST(StaticGetters) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ i::Handle<i::Object> undefined_value = FACTORY->undefined_value();
+ CHECK(*v8::Utils::OpenHandle(*v8::Undefined()) == *undefined_value);
+ CHECK(*v8::Utils::OpenHandle(*v8::Undefined(isolate)) == *undefined_value);
+ i::Handle<i::Object> null_value = FACTORY->null_value();
+ CHECK(*v8::Utils::OpenHandle(*v8::Null()) == *null_value);
+ CHECK(*v8::Utils::OpenHandle(*v8::Null(isolate)) == *null_value);
+ i::Handle<i::Object> true_value = FACTORY->true_value();
+ CHECK(*v8::Utils::OpenHandle(*v8::True()) == *true_value);
+ CHECK(*v8::Utils::OpenHandle(*v8::True(isolate)) == *true_value);
+ i::Handle<i::Object> false_value = FACTORY->false_value();
+ CHECK(*v8::Utils::OpenHandle(*v8::False()) == *false_value);
+ CHECK(*v8::Utils::OpenHandle(*v8::False(isolate)) == *false_value);
+
+ // Test after-death behavior.
+ CHECK(i::Internals::IsInitialized(isolate));
+ CHECK_EQ(0, fatal_error_callback_counter);
+ v8::V8::SetFatalErrorHandler(CountingErrorCallback);
+ v8::Utils::ReportApiFailure("StaticGetters()", "Kill V8");
+ i::Isolate::Current()->TearDown();
+ CHECK(!i::Internals::IsInitialized(isolate));
+ CHECK_EQ(1, fatal_error_callback_counter);
+ CHECK(v8::Undefined().IsEmpty());
+ CHECK_EQ(2, fatal_error_callback_counter);
+ CHECK(v8::Undefined(isolate).IsEmpty());
+ CHECK_EQ(3, fatal_error_callback_counter);
+ CHECK(v8::Null().IsEmpty());
+ CHECK_EQ(4, fatal_error_callback_counter);
+ CHECK(v8::Null(isolate).IsEmpty());
+ CHECK_EQ(5, fatal_error_callback_counter);
+ CHECK(v8::True().IsEmpty());
+ CHECK_EQ(6, fatal_error_callback_counter);
+ CHECK(v8::True(isolate).IsEmpty());
+ CHECK_EQ(7, fatal_error_callback_counter);
+ CHECK(v8::False().IsEmpty());
+ CHECK_EQ(8, fatal_error_callback_counter);
+ CHECK(v8::False(isolate).IsEmpty());
+ CHECK_EQ(9, fatal_error_callback_counter);
+}
+
+
+TEST(IsolateEmbedderData) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ CHECK_EQ(NULL, isolate->GetData());
+ CHECK_EQ(NULL, ISOLATE->GetData());
+ static void* data1 = reinterpret_cast<void*>(0xacce55ed);
+ isolate->SetData(data1);
+ CHECK_EQ(data1, isolate->GetData());
+ CHECK_EQ(data1, ISOLATE->GetData());
+ static void* data2 = reinterpret_cast<void*>(0xdecea5ed);
+ ISOLATE->SetData(data2);
+ CHECK_EQ(data2, isolate->GetData());
+ CHECK_EQ(data2, ISOLATE->GetData());
+ ISOLATE->TearDown();
+ CHECK_EQ(data2, isolate->GetData());
+ CHECK_EQ(data2, ISOLATE->GetData());
+}
+
+
+TEST(StringEmpty) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ i::Handle<i::Object> empty_string = FACTORY->empty_symbol();
+ CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
+ CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
+
+ // Test after-death behavior.
+ CHECK(i::Internals::IsInitialized(isolate));
+ CHECK_EQ(0, fatal_error_callback_counter);
+ v8::V8::SetFatalErrorHandler(CountingErrorCallback);
+ v8::Utils::ReportApiFailure("StringEmpty()", "Kill V8");
+ i::Isolate::Current()->TearDown();
+ CHECK(!i::Internals::IsInitialized(isolate));
+ CHECK_EQ(1, fatal_error_callback_counter);
+ CHECK(v8::String::Empty().IsEmpty());
+ CHECK_EQ(2, fatal_error_callback_counter);
+ CHECK(v8::String::Empty(isolate).IsEmpty());
+ CHECK_EQ(3, fatal_error_callback_counter);
+}
diff --git a/test/cctest/test-double.cc b/test/cctest/test-double.cc
index 3594a4f..6ef42c6 100644
--- a/test/cctest/test-double.cc
+++ b/test/cctest/test-double.cc
@@ -112,21 +112,6 @@
}
-TEST(IsNan) {
- CHECK(Double(OS::nan_value()).IsNan());
- uint64_t other_nan = V8_2PART_UINT64_C(0xFFFFFFFF, 00000001);
- CHECK(Double(other_nan).IsNan());
- CHECK(!Double(V8_INFINITY).IsNan());
- CHECK(!Double(-V8_INFINITY).IsNan());
- CHECK(!Double(0.0).IsNan());
- CHECK(!Double(-0.0).IsNan());
- CHECK(!Double(1.0).IsNan());
- CHECK(!Double(-1.0).IsNan());
- uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
- CHECK(!Double(min_double64).IsNan());
-}
-
-
TEST(Sign) {
CHECK_EQ(1, Double(1.0).Sign());
CHECK_EQ(1, Double(V8_INFINITY).Sign());
diff --git a/test/mjsunit/array-bounds-check-removal.js b/test/mjsunit/array-bounds-check-removal.js
new file mode 100644
index 0000000..81064aa
--- /dev/null
+++ b/test/mjsunit/array-bounds-check-removal.js
@@ -0,0 +1,145 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var a = new Int32Array(1024);
+
+function test_base(base,cond) {
+ a[base + 1] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ if (cond) {
+ a[base + 1] = 1;
+ a[base + 2] = 2;
+ a[base + 2] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ } else {
+ a[base + 6] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base - 4;
+ }
+}
+
+function check_test_base(base,cond) {
+ if (cond) {
+ assertEquals(1, a[base + 1]);
+ assertEquals(4, a[base + 2]);
+ assertEquals(base + 4, a[base + 4]);
+ } else {
+ assertEquals(1, a[base + 6]);
+ assertEquals(3, a[base + 3]);
+ assertEquals(4, a[base + 2]);
+ assertEquals(base - 4, a[base + 4]);
+ }
+}
+
+
+function test_minus(base,cond) {
+ a[base - 1] = 1;
+ a[base - 2] = 2;
+ a[base + 4] = 3;
+ a[base] = 4;
+ a[base + 4] = base + 4;
+ if (cond) {
+ a[base - 4] = 1;
+ a[base + 5] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ } else {
+ a[base + 6] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base - 4;
+ }
+}
+
+function check_test_minus(base,cond) {
+ if (cond) {
+ assertEquals(2, a[base + 5]);
+ assertEquals(3, a[base + 3]);
+ assertEquals(4, a[base + 2]);
+ assertEquals(base + 4, a[base + 4]);
+ } else {
+ assertEquals(1, a[base + 6]);
+ assertEquals(3, a[base + 3]);
+ assertEquals(4, a[base + 2]);
+ assertEquals(base - 4, a[base + 4]);
+ }
+}
+
+test_base(1,true);
+test_base(2,true);
+test_base(1,false);
+test_base(2,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(3,true);
+check_test_base(3,true);
+test_base(3,false);
+check_test_base(3,false);
+
+test_minus(5,true);
+test_minus(6,true);
+%OptimizeFunctionOnNextCall(test_minus);
+test_minus(7,true);
+check_test_minus(7,true);
+test_minus(7,false);
+check_test_minus(7,false);
+
+// Optimization status:
+// YES: 1
+// NO: 2
+// ALWAYS: 3
+// NEVER: 4
+
+if (false) {
+test_base(5,true);
+test_base(6,true);
+test_base(5,false);
+test_base(6,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(-2,true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+
+test_base(5,true);
+test_base(6,true);
+test_base(5,false);
+test_base(6,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(2048,true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+}
+
+gc();
+
diff --git a/test/mjsunit/math-floor-of-div.js b/test/mjsunit/math-floor-of-div.js
new file mode 100644
index 0000000..e917182
--- /dev/null
+++ b/test/mjsunit/math-floor-of-div.js
@@ -0,0 +1,216 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse_inlining
+
+// Use this function as reference. Make sure it is not inlined.
+function div(a, b) {
+ return a / b;
+}
+
+var limit = 0x1000000;
+var exhaustive_limit = 100;
+var step = 10;
+var values = [0x10000001,
+ 0x12345678,
+ -0x789abcdf, // 0x87654321
+ 0x01234567,
+ 0x76543210,
+ -0x80000000, // 0x80000000
+ 0x7fffffff,
+ -0x0fffffff, // 0xf0000001
+ 0x00000010,
+ -0x01000000 // 0xff000000
+ ];
+
+function test_div() {
+ var c = 0;
+ for (var k = 0; k <= limit; k++) {
+ if (k > exhaustive_limit) { c += step; k += c; }
+ assertEquals(Math.floor(div(k, 1)), Math.floor(k / 1));
+ assertEquals(Math.floor(div(k, -1)), Math.floor(k / -1));
+ assertEquals(Math.floor(div(k, 2)), Math.floor(k / 2));
+ assertEquals(Math.floor(div(k, -2)), Math.floor(k / -2));
+ assertEquals(Math.floor(div(k, 3)), Math.floor(k / 3));
+ assertEquals(Math.floor(div(k, -3)), Math.floor(k / -3));
+ assertEquals(Math.floor(div(k, 4)), Math.floor(k / 4));
+ assertEquals(Math.floor(div(k, -4)), Math.floor(k / -4));
+ assertEquals(Math.floor(div(k, 5)), Math.floor(k / 5));
+ assertEquals(Math.floor(div(k, -5)), Math.floor(k / -5));
+ assertEquals(Math.floor(div(k, 6)), Math.floor(k / 6));
+ assertEquals(Math.floor(div(k, -6)), Math.floor(k / -6));
+ assertEquals(Math.floor(div(k, 7)), Math.floor(k / 7));
+ assertEquals(Math.floor(div(k, -7)), Math.floor(k / -7));
+ assertEquals(Math.floor(div(k, 8)), Math.floor(k / 8));
+ assertEquals(Math.floor(div(k, -8)), Math.floor(k / -8));
+ assertEquals(Math.floor(div(k, 9)), Math.floor(k / 9));
+ assertEquals(Math.floor(div(k, -9)), Math.floor(k / -9));
+ assertEquals(Math.floor(div(k, 10)), Math.floor(k / 10));
+ assertEquals(Math.floor(div(k, -10)), Math.floor(k / -10));
+ assertEquals(Math.floor(div(k, 11)), Math.floor(k / 11));
+ assertEquals(Math.floor(div(k, -11)), Math.floor(k / -11));
+ assertEquals(Math.floor(div(k, 12)), Math.floor(k / 12));
+ assertEquals(Math.floor(div(k, -12)), Math.floor(k / -12));
+ assertEquals(Math.floor(div(k, 13)), Math.floor(k / 13));
+ assertEquals(Math.floor(div(k, -13)), Math.floor(k / -13));
+ assertEquals(Math.floor(div(k, 14)), Math.floor(k / 14));
+ assertEquals(Math.floor(div(k, -14)), Math.floor(k / -14));
+ assertEquals(Math.floor(div(k, 15)), Math.floor(k / 15));
+ assertEquals(Math.floor(div(k, -15)), Math.floor(k / -15));
+ assertEquals(Math.floor(div(k, 16)), Math.floor(k / 16));
+ assertEquals(Math.floor(div(k, -16)), Math.floor(k / -16));
+ assertEquals(Math.floor(div(k, 17)), Math.floor(k / 17));
+ assertEquals(Math.floor(div(k, -17)), Math.floor(k / -17));
+ assertEquals(Math.floor(div(k, 18)), Math.floor(k / 18));
+ assertEquals(Math.floor(div(k, -18)), Math.floor(k / -18));
+ assertEquals(Math.floor(div(k, 19)), Math.floor(k / 19));
+ assertEquals(Math.floor(div(k, -19)), Math.floor(k / -19));
+ assertEquals(Math.floor(div(k, 20)), Math.floor(k / 20));
+ assertEquals(Math.floor(div(k, -20)), Math.floor(k / -20));
+ assertEquals(Math.floor(div(k, 21)), Math.floor(k / 21));
+ assertEquals(Math.floor(div(k, -21)), Math.floor(k / -21));
+ assertEquals(Math.floor(div(k, 22)), Math.floor(k / 22));
+ assertEquals(Math.floor(div(k, -22)), Math.floor(k / -22));
+ assertEquals(Math.floor(div(k, 23)), Math.floor(k / 23));
+ assertEquals(Math.floor(div(k, -23)), Math.floor(k / -23));
+ assertEquals(Math.floor(div(k, 24)), Math.floor(k / 24));
+ assertEquals(Math.floor(div(k, -24)), Math.floor(k / -24));
+ assertEquals(Math.floor(div(k, 25)), Math.floor(k / 25));
+ assertEquals(Math.floor(div(k, -25)), Math.floor(k / -25));
+ assertEquals(Math.floor(div(k, 125)), Math.floor(k / 125));
+ assertEquals(Math.floor(div(k, -125)), Math.floor(k / -125));
+ assertEquals(Math.floor(div(k, 625)), Math.floor(k / 625));
+ assertEquals(Math.floor(div(k, -625)), Math.floor(k / -625));
+ }
+ c = 0;
+ for (var k = 0; k <= limit; k++) {
+ if (k > exhaustive_limit) { c += step; k += c; }
+ assertEquals(Math.floor(div(-k, 1)), Math.floor(-k / 1));
+ assertEquals(Math.floor(div(-k, -1)), Math.floor(-k / -1));
+ assertEquals(Math.floor(div(-k, 2)), Math.floor(-k / 2));
+ assertEquals(Math.floor(div(-k, -2)), Math.floor(-k / -2));
+ assertEquals(Math.floor(div(-k, 3)), Math.floor(-k / 3));
+ assertEquals(Math.floor(div(-k, -3)), Math.floor(-k / -3));
+ assertEquals(Math.floor(div(-k, 4)), Math.floor(-k / 4));
+ assertEquals(Math.floor(div(-k, -4)), Math.floor(-k / -4));
+ assertEquals(Math.floor(div(-k, 5)), Math.floor(-k / 5));
+ assertEquals(Math.floor(div(-k, -5)), Math.floor(-k / -5));
+ assertEquals(Math.floor(div(-k, 6)), Math.floor(-k / 6));
+ assertEquals(Math.floor(div(-k, -6)), Math.floor(-k / -6));
+ assertEquals(Math.floor(div(-k, 7)), Math.floor(-k / 7));
+ assertEquals(Math.floor(div(-k, -7)), Math.floor(-k / -7));
+ assertEquals(Math.floor(div(-k, 8)), Math.floor(-k / 8));
+ assertEquals(Math.floor(div(-k, -8)), Math.floor(-k / -8));
+ assertEquals(Math.floor(div(-k, 9)), Math.floor(-k / 9));
+ assertEquals(Math.floor(div(-k, -9)), Math.floor(-k / -9));
+ assertEquals(Math.floor(div(-k, 10)), Math.floor(-k / 10));
+ assertEquals(Math.floor(div(-k, -10)), Math.floor(-k / -10));
+ assertEquals(Math.floor(div(-k, 11)), Math.floor(-k / 11));
+ assertEquals(Math.floor(div(-k, -11)), Math.floor(-k / -11));
+ assertEquals(Math.floor(div(-k, 12)), Math.floor(-k / 12));
+ assertEquals(Math.floor(div(-k, -12)), Math.floor(-k / -12));
+ assertEquals(Math.floor(div(-k, 13)), Math.floor(-k / 13));
+ assertEquals(Math.floor(div(-k, -13)), Math.floor(-k / -13));
+ assertEquals(Math.floor(div(-k, 14)), Math.floor(-k / 14));
+ assertEquals(Math.floor(div(-k, -14)), Math.floor(-k / -14));
+ assertEquals(Math.floor(div(-k, 15)), Math.floor(-k / 15));
+ assertEquals(Math.floor(div(-k, -15)), Math.floor(-k / -15));
+ assertEquals(Math.floor(div(-k, 16)), Math.floor(-k / 16));
+ assertEquals(Math.floor(div(-k, -16)), Math.floor(-k / -16));
+ assertEquals(Math.floor(div(-k, 17)), Math.floor(-k / 17));
+ assertEquals(Math.floor(div(-k, -17)), Math.floor(-k / -17));
+ assertEquals(Math.floor(div(-k, 18)), Math.floor(-k / 18));
+ assertEquals(Math.floor(div(-k, -18)), Math.floor(-k / -18));
+ assertEquals(Math.floor(div(-k, 19)), Math.floor(-k / 19));
+ assertEquals(Math.floor(div(-k, -19)), Math.floor(-k / -19));
+ assertEquals(Math.floor(div(-k, 20)), Math.floor(-k / 20));
+ assertEquals(Math.floor(div(-k, -20)), Math.floor(-k / -20));
+ assertEquals(Math.floor(div(-k, 21)), Math.floor(-k / 21));
+ assertEquals(Math.floor(div(-k, -21)), Math.floor(-k / -21));
+ assertEquals(Math.floor(div(-k, 22)), Math.floor(-k / 22));
+ assertEquals(Math.floor(div(-k, -22)), Math.floor(-k / -22));
+ assertEquals(Math.floor(div(-k, 23)), Math.floor(-k / 23));
+ assertEquals(Math.floor(div(-k, -23)), Math.floor(-k / -23));
+ assertEquals(Math.floor(div(-k, 24)), Math.floor(-k / 24));
+ assertEquals(Math.floor(div(-k, -24)), Math.floor(-k / -24));
+ assertEquals(Math.floor(div(-k, 25)), Math.floor(-k / 25));
+ assertEquals(Math.floor(div(-k, -25)), Math.floor(-k / -25));
+ assertEquals(Math.floor(div(-k, 125)), Math.floor(-k / 125));
+ assertEquals(Math.floor(div(-k, -125)), Math.floor(-k / -125));
+ assertEquals(Math.floor(div(-k, 625)), Math.floor(-k / 625));
+ assertEquals(Math.floor(div(-k, -625)), Math.floor(-k / -625));
+ }
+ // Test for edge cases.
+ // Use (values[key] | 0) to force the integer type.
+ for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < values.length; j++) {
+ assertEquals(Math.floor(div((values[i] | 0), (values[j] | 0))),
+ Math.floor((values[i] | 0) / (values[j] | 0)));
+ assertEquals(Math.floor(div(-(values[i] | 0), (values[j] | 0))),
+ Math.floor(-(values[i] | 0) / (values[j] | 0)));
+ assertEquals(Math.floor(div((values[i] | 0), -(values[j] | 0))),
+ Math.floor((values[i] | 0) / -(values[j] | 0)));
+ assertEquals(Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
+ Math.floor(-(values[i] | 0) / -(values[j] | 0)));
+ }
+ }
+}
+
+test_div();
+%OptimizeFunctionOnNextCall(test_div);
+test_div();
+
+// Test for negative zero and overflow.
+// Separate the tests to prevent deoptimizations from making the other optimized
+// test unreachable.
+
+function IsNegativeZero(x) {
+ assertTrue(x == 0); // Is 0 or -0.
+ var y = 1 / x;
+ assertFalse(isFinite(y));
+ return y < 0;
+}
+
+function test_div_deopt_minus_zero() {
+ var zero_in_array = [0];
+ assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+}
+
+function test_div_deopt_overflow() {
+ // We box the value in an array to avoid constant propagation.
+ var min_int_in_array = [-2147483648];
+ // We use '| 0' to force the representation to int32.
+ assertEquals(-min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / -1));
+}
+
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
diff --git a/test/mjsunit/regexp-capture-3.js b/test/mjsunit/regexp-capture-3.js
index 2662d72..b8f1cfc 100644
--- a/test/mjsunit/regexp-capture-3.js
+++ b/test/mjsunit/regexp-capture-3.js
@@ -86,3 +86,73 @@
for (var i = 3; i < 10; i++) {
assertEquals("", RegExp['$' + i]);
}
+
+
+function Override() {
+ // Set the internal lastMatchInfoOverride. After calling this we do a normal
+ // match and verify the override was cleared and that we record the new
+ // captures.
+ "abcdabcd".replace(/(b)(c)/g, function() { });
+}
+
+
+function TestOverride(input, expect, property, re_src) {
+ var re = new RegExp(re_src);
+ var re_g = new RegExp(re_src, "g");
+
+ function OverrideCase(fn) {
+ Override();
+ fn();
+ assertEquals(expect, RegExp[property]);
+ }
+
+ OverrideCase(function() { return input.replace(re, "x"); });
+ OverrideCase(function() { return input.replace(re_g, "x"); });
+ OverrideCase(function() { return input.replace(re, ""); });
+ OverrideCase(function() { return input.replace(re_g, ""); });
+ OverrideCase(function() { return input.match(re); });
+ OverrideCase(function() { return input.match(re_g); });
+ OverrideCase(function() { return re.test(input); });
+ OverrideCase(function() { return re_g.test(input); });
+}
+
+var input = "bar.foo baz......";
+var re_str = "(ba.).*?f";
+TestOverride(input, "bar", "$1", re_str);
+
+input = "foo bar baz";
+var re_str = "bar";
+TestOverride(input, "bar", "$&", re_str);
+
+
+function no_last_match(fn) {
+ fn();
+ assertEquals("hestfisk", RegExp.$1);
+}
+
+/(hestfisk)/.test("There's no such thing as a hestfisk!");
+
+no_last_match(function() { "foo".replace("f", ""); });
+no_last_match(function() { "foo".replace("f", "f"); });
+no_last_match(function() { "foo".split("o"); });
+
+var base = "In the music. In the music. ";
+var cons = base + base + base + base;
+no_last_match(function() { cons.replace("x", "y"); });
+no_last_match(function() { cons.replace("e", "E"); });
+
+
+// Here's one that matches once, then tries to match again, but fails.
+// Verify that the last match info is from the last match, not from the
+// failure that came after.
+"bar.foo baz......".replace(/(ba.).*?f/g, function() { return "x";});
+assertEquals("bar", RegExp.$1);
+
+
+// A test that initially does a zero width match, but later does a non-zero
+// width match.
+var a = "foo bar baz".replace(/^|bar/g, "");
+assertEquals("foo baz", a);
+
+a = "foo bar baz".replace(/^|bar/g, "*");
+assertEquals("*foo * baz", a);
diff --git a/test/mjsunit/regress/regress-1217.js b/test/mjsunit/regress/regress-1217.js
index 6530549..e00d537 100644
--- a/test/mjsunit/regress/regress-1217.js
+++ b/test/mjsunit/regress/regress-1217.js
@@ -30,7 +30,7 @@
var proto = RegExp.prototype;
assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
-assertEquals("", proto.source);
+assertEquals("(?:)", proto.source);
assertEquals(false, proto.global);
assertEquals(false, proto.multiline);
assertEquals(false, proto.ignoreCase);
diff --git a/test/mjsunit/regress/regress-124594.js b/test/mjsunit/regress/regress-124594.js
new file mode 100644
index 0000000..d51e1f6
--- /dev/null
+++ b/test/mjsunit/regress/regress-124594.js
@@ -0,0 +1,50 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// Test that a GC inside a constructor frame is correctly handled right
+// after we deoptimize from an inlined constructor to a constructor stub
+// stack frame.
+
+function f(deopt) {
+ var x = 1;
+ if (deopt) {
+ x = x + "foo";
+ gc();
+ }
+ this.x = x;
+}
+
+function g(deopt) {
+ return new f(deopt);
+}
+
+assertEquals({x:1}, g(false));
+assertEquals({x:1}, g(false));
+%OptimizeFunctionOnNextCall(g);
+assertEquals({x:"1foo"}, g(true));
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 9eafb4b..c30be5e 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -600,6 +600,12 @@
js1_2/regexp/hexadecimal: FAIL_OK
+# The source field of RegExp objects is properly escaped. We match JSC.
+ecma_2/RegExp/constructor-001: FAIL_OK
+ecma_2/RegExp/function-001: FAIL_OK
+ecma_2/RegExp/properties-001: FAIL_OK
+
+
##################### FAILING TESTS #####################
# This section is for tests that fail in V8 and pass in JSC.
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 5cda6fd..52d126e 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -124,6 +124,16 @@
S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos
S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
+# The source field of RegExp objects is properly escaped. We match JSC.
+S15.10.4.1_A3_T1: FAIL_OK
+S15.10.4.1_A3_T2: FAIL_OK
+S15.10.4.1_A3_T3: FAIL_OK
+S15.10.4.1_A3_T4: FAIL_OK
+S15.10.4.1_A3_T5: FAIL_OK
+S15.10.4.1_A4_T2: FAIL_OK
+S15.10.4.1_A4_T3: FAIL_OK
+S15.10.4.1_A4_T5: FAIL_OK
+
##################### ES3 TESTS #########################
# These tests check for ES3 semantics, and differ from ES5.
# When we follow ES5 semantics, it's ok to fail the test.
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 98e27ba..c755289 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -33,9 +33,15 @@
# '__proto__' should be treated as a normal property in JSON.
S15.12.2_A1: FAIL
+# Sequencing of getter side effects on receiver and argument properties
+# is wrong. The receiver callback should be called before any arguments
+# are evaluated.
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
11.2.3-3_3: FAIL
+# Prototypal inheritance of properties does not maintain accessibility.
+# The [[CanPut]] operation should traverse the prototype chain to
+# determine whether given property is writable or not.
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1475
8.14.4-8-b_1: FAIL
8.14.4-8-b_2: FAIL
@@ -44,9 +50,6 @@
15.2.3.6-4-415: FAIL
15.2.3.6-4-420: FAIL
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1982
-15.10.4.1-5: FAIL
-
##################### DELIBERATE INCOMPATIBILITIES #####################
# We deliberately treat arguments to parseInt() with a leading zero as