Revision 2.4.4.
Fix bug with hangs on very large sparse arrays.
Try harder to free up memory when running out of space.
Add heap snapshots to JSON format to API.
Recalibrate benchmarks.
Review URL: http://codereview.chromium.org/3421009
git-svn-id: http://v8.googlecode.com/svn/trunk@5462 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 0d01fcc..e09d4c9 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4739,6 +4739,23 @@
}
+void HeapSnapshot::Serialize(OutputStream* stream,
+ HeapSnapshot::SerializationFormat format) const {
+ IsDeadCheck("v8::HeapSnapshot::Serialize");
+ ApiCheck(format == kJSON,
+ "v8::HeapSnapshot::Serialize",
+ "Unknown serialization format");
+ ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
+ "v8::HeapSnapshot::Serialize",
+ "Unsupported output encoding");
+ ApiCheck(stream->GetChunkSize() > 0,
+ "v8::HeapSnapshot::Serialize",
+ "Invalid stream chunk size");
+ i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
+ serializer.Serialize(stream);
+}
+
+
int HeapProfiler::GetSnapshotsCount() {
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index fa93030..8f801cf 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -930,6 +930,24 @@
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, ¬_two_smis);
+ __ sub(r0, r1, r0);
+ __ b(vc, &smi_done);
+ // Correct the sign in case of overflow.
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+ __ bind(&smi_done);
+ __ Ret();
+ __ bind(¬_two_smis);
+ } else if (FLAG_debug_code) {
+ __ orr(r2, r1, r0);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(nz, "CompareStub: unexpected smi operands.");
+ }
+
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -2288,7 +2306,7 @@
__ push(r0);
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
- __ StubReturn(1);
+ __ Ret();
}
@@ -2299,32 +2317,37 @@
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
+ if (include_smi_code_) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ StubReturn(1);
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
- __ StubReturn(1, vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ if (negative_zero_ == kStrictNegativeZero) {
+ // If we have to check for zero, then we can check for the max negative
+ // smi while we are at it.
+ __ bic(ip, r0, Operand(0x80000000), SetCC);
+ __ b(eq, &slow);
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+ __ Ret();
+ } else {
+ // The value of the expression is a smi and 0 is OK for -0. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
+ __ Ret(vc);
+ // We don't have to reverse the optimistic neg since the only case
+ // where we fall through is the minimum negative Smi, which is the case
+ // where the neg leaves the register unchanged.
+ __ jmp(&slow); // Go slow on max negative Smi.
+ }
+ __ bind(&try_float);
+ } else if (FLAG_debug_code) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected smi operand.");
}
- __ bind(&try_float);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
@@ -2344,6 +2367,19 @@
__ mov(r0, Operand(r1));
}
} else if (op_ == Token::BIT_NOT) {
+ if (include_smi_code_) {
+ Label non_smi;
+ __ BranchOnNotSmi(r0, &non_smi);
+ __ mvn(r0, Operand(r0));
+ // Bit-clear inverted smi-tag.
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ __ Ret();
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected smi operand.");
+ }
+
// Check if the operand is a heap number.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@@ -2391,7 +2427,7 @@
}
__ bind(&done);
- __ StubReturn(1);
+ __ Ret();
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
@@ -3499,6 +3535,11 @@
include_number_compare_name = "_NO_NUMBER";
}
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s%s%s",
cc_name,
@@ -3506,7 +3547,8 @@
rhs_name,
strict_name,
never_nan_nan_name,
- include_number_compare_name);
+ include_number_compare_name,
+ include_smi_compare_name);
return name_;
}
@@ -3522,7 +3564,8 @@
| RegisterField::encode(lhs_.is(r0))
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
}
@@ -4144,17 +4187,21 @@
// Check bounds and smi-ness.
- __ ldr(r7, MemOperand(sp, kToOffset));
- __ ldr(r6, MemOperand(sp, kFromOffset));
+ Register to = r6;
+ Register from = r7;
+ __ Ldrd(to, from, MemOperand(sp, kToOffset));
+ STATIC_ASSERT(kFromOffset == kToOffset + 4);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r7, ASR, 1), SetCC);
- __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
- // If either r2 or r6 had the smi tag bit set, then carry is set now.
+ __ mov(r2, Operand(to, ASR, 1), SetCC);
+ __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
__ b(mi, &runtime); // From is negative.
+ // Both to and from are smis.
+
__ sub(r2, r2, Operand(r3), SetCC);
__ b(mi, &runtime); // Fail if from > to.
// Special handling of sub-strings of length 1 and 2. One character strings
@@ -4165,8 +4212,8 @@
// r2: length
// r3: from index (untaged smi)
- // r6: from (smi)
- // r7: to (smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
// Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset));
@@ -4178,10 +4225,10 @@
// r1: instance type
// r2: length
- // r3: from index (untaged smi)
+ // r3: from index (untagged smi)
// r5: string
- // r6: from (smi)
- // r7: to (smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
Label seq_string;
__ and_(r4, r1, Operand(kStringRepresentationMask));
STATIC_ASSERT(kSeqStringTag < kConsStringTag);
@@ -4207,17 +4254,18 @@
// r2: length
// r3: from index (untaged smi)
// r5: string
- // r6: from (smi)
- // r7: to (smi)
+ // r6 (a.k.a. to): to (smi)
+ // r7 (a.k.a. from): from offset (smi)
__ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
- __ cmp(r4, Operand(r7));
+ __ cmp(r4, Operand(to));
__ b(lt, &runtime); // Fail if to > length.
+ to = no_reg;
// r1: instance type.
// r2: result string length.
// r3: from index (untaged smi)
// r5: string.
- // r6: from offset (smi)
+ // r7 (a.k.a. from): from offset (smi)
// Check for flat ascii string.
Label non_ascii_flat;
__ tst(r1, Operand(kStringEncodingMask));
@@ -4259,12 +4307,12 @@
// r0: result string.
// r2: result string length.
// r5: string.
- // r6: from offset (smi)
+ // r7 (a.k.a. from): from offset (smi)
// Locate first character of result.
__ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate 'from' character of string.
__ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r6, ASR, 1));
+ __ add(r5, r5, Operand(from, ASR, 1));
// r0: result string.
// r1: first character of result string.
@@ -4280,7 +4328,7 @@
__ bind(&non_ascii_flat);
// r2: result string length.
// r5: string.
- // r6: from offset (smi)
+ // r7 (a.k.a. from): from offset (smi)
// Check for flat two byte string.
// Allocate the result.
@@ -4292,18 +4340,19 @@
// Locate first character of result.
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// As "from" is a smi it is 2 times the value which matches the size of a two
// byte character.
- __ add(r5, r5, Operand(r6));
+ __ add(r5, r5, Operand(from));
+ from = no_reg;
// r0: result string.
// r1: first character of result.
// r2: result length.
// r5: first character of string to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -4379,8 +4428,7 @@
// Stack frame on entry.
// sp[0]: right string
// sp[4]: left string
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
+ __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
Label not_same;
__ cmp(r0, r1);
@@ -4395,12 +4443,12 @@
__ bind(¬_same);
// Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+ __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
// Compare flat ascii strings natively. Remove arguments from stack first.
__ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+ GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f985fb4..6ba166f 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1651,7 +1651,7 @@
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
- CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
+ CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
@@ -5985,6 +5985,7 @@
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
+ NO_UNARY_FLAGS,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
frame_->CallStub(&stub, 0);
frame_->EmitPush(r0); // r0 has result
@@ -6009,7 +6010,9 @@
not_smi_label.Bind();
frame_->SpillAll();
__ Move(r0, tos);
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ GenericUnaryOpStub stub(Token::BIT_NOT,
+ overwrite,
+ NO_UNARY_SMI_CODE_IN_STUB);
frame_->CallStub(&stub, 0);
frame_->EmitPush(r0);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 162d97f..d4c3522 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -271,10 +271,6 @@
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
@@ -290,6 +286,12 @@
}
private:
+ // Type of a member function that generates inline code for a native function.
+ typedef void (CodeGenerator::*InlineFunctionGenerator)
+ (ZoneList<Expression*>*);
+
+ static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -447,13 +449,9 @@
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
+ static InlineFunctionGenerator FindInlineFunctionGenerator(
+ Runtime::FunctionId function_id);
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static Handle<Code> ComputeLazyCompile(int argc);
@@ -599,8 +597,6 @@
// Size of inlined write barriers generated by EmitNamedStore.
static int inlined_write_barrier_size_;
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0f8f6d4..c776d67 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -672,7 +672,8 @@
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
@@ -684,7 +685,10 @@
__ bind(&slow_case);
}
- CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(eq, true, flags, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &next_test);
@@ -2888,7 +2892,9 @@
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite);
+ GenericUnaryOpStub stub(Token::SUB,
+ overwrite,
+ NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForValue(expr->expression(), kAccumulator);
@@ -2903,7 +2909,8 @@
// in the accumulator register r0.
VisitForValue(expr->expression(), kAccumulator);
Label done;
- if (ShouldInlineSmiCase(expr->op())) {
+ bool inline_smi_code = ShouldInlineSmiCase(expr->op());
+ if (inline_smi_code) {
Label call_stub;
__ BranchOnNotSmi(r0, &call_stub);
__ mvn(r0, Operand(r0));
@@ -2913,9 +2920,12 @@
__ bind(&call_stub);
}
bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOpFlags flags = inline_smi_code
+ ? NO_UNARY_SMI_CODE_IN_STUB
+ : NO_UNARY_FLAGS;
UnaryOverwriteMode mode =
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
Apply(context_, r0);
@@ -3292,7 +3302,8 @@
UNREACHABLE();
}
- if (ShouldInlineSmiCase(op)) {
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
__ BranchOnNotSmi(r2, &slow_case);
@@ -3300,8 +3311,10 @@
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
-
- CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(cc, strict, flags, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 3554431..0e2c49e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1242,15 +1242,6 @@
}
-void MacroAssembler::StubReturn(int argc, Condition cond) {
- ASSERT(argc >= 1 && generating_stub());
- if (argc > 1) {
- add(sp, sp, Operand((argc - 1) * kPointerSize), LeaveCC, cond);
- }
- Ret(cond);
-}
-
-
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index febd87e..48a8059 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -531,9 +531,6 @@
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc, Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 98a5cf6..912d43d 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -340,27 +340,40 @@
};
+enum UnaryOpFlags {
+ NO_UNARY_FLAGS = 0,
+ NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
+};
+
+
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
+ UnaryOpFlags flags,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
- : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
+ : op_(op),
+ overwrite_(overwrite),
+ include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
+ negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
+ bool include_smi_code_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
- class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
- class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
+ class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
+ class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
+ class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
- OverwriteField::encode(overwrite_) |
- NegativeZeroField::encode(negative_zero_);
+ OverwriteField::encode(overwrite_) |
+ IncludeSmiCodeField::encode(include_smi_code_) |
+ NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
@@ -375,22 +388,43 @@
};
+// Flags that control the compare stub code generation.
+enum CompareFlags {
+ NO_COMPARE_FLAGS = 0,
+ NO_SMI_COMPARE_IN_STUB = 1 << 0,
+ NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
+ CANT_BOTH_BE_NAN = 1 << 2
+};
+
+
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
bool strict,
- NaNInformation nan_info = kBothCouldBeNaN,
- bool include_number_compare = true,
- Register lhs = no_reg,
- Register rhs = no_reg) :
+ CompareFlags flags,
+ Register lhs,
+ Register rhs) :
cc_(cc),
strict_(strict),
- never_nan_nan_(nan_info == kCantBothBeNaN),
- include_number_compare_(include_number_compare),
+ never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+ include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+ include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(lhs),
rhs_(rhs),
name_(NULL) { }
+ CompareStub(Condition cc,
+ bool strict,
+ CompareFlags flags) :
+ cc_(cc),
+ strict_(strict),
+ never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+ include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+ include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
+ lhs_(no_reg),
+ rhs_(no_reg),
+ name_(NULL) { }
+
void Generate(MacroAssembler* masm);
private:
@@ -406,6 +440,10 @@
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
+
+ // Generate the comparison code for two smi operands in the stub.
+ bool include_smi_compare_;
+
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
@@ -413,12 +451,13 @@
// a choice, no_reg otherwise.
Register rhs_;
- // Encoding of the minor key CCCCCCCCCCCCRCNS.
+ // Encoding of the minor key in 16 bits.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class RegisterField: public BitField<bool, 3, 1> {};
- class ConditionField: public BitField<int, 4, 12> {};
+ class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
+ class RegisterField: public BitField<bool, 4, 1> {};
+ class ConditionField: public BitField<int, 5, 11> {};
Major MajorKey() { return Compare; }
@@ -436,11 +475,13 @@
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("CompareStub (cc %d), (strict %s), "
- "(never_nan_nan %s), (number_compare %s) ",
+ PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
+ "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
+ MinorKey(),
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
+ include_smi_compare_ ? "inluded" : "not included",
include_number_compare_ ? "included" : "not included");
if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
diff --git a/src/codegen.cc b/src/codegen.cc
index 148cefc..daf1c0d 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -344,40 +344,35 @@
}
-// List of special runtime calls which are generated inline. For some of these
-// functions the code will be generated inline, and for others a call to a code
-// stub will be inlined.
+// Lookup table for code generators for special runtime calls which are
+// generated inline.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
+ &CodeGenerator::Generate##Name,
-#define INLINE_RUNTIME_ENTRY(Name, argc, ressize) \
- {&CodeGenerator::Generate##Name, "_" #Name, argc}, \
-
-CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
+const CodeGenerator::InlineFunctionGenerator
+ CodeGenerator::kInlineFunctionGenerators[] = {
+ INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-#undef INLINE_RUNTIME_ENTRY
-CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
- Handle<String> name) {
- const int entries_count =
- sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
- for (int i = 0; i < entries_count; i++) {
- InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
- if (name->IsEqualTo(CStrVector(entry->name))) {
- return entry;
- }
- }
- return NULL;
+CodeGenerator::InlineFunctionGenerator
+ CodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
+ return kInlineFunctionGenerators[
+ static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
}
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
- if (entry != NULL) {
- ((*this).*(entry->method))(args);
+ Runtime::Function* function = node->function();
+ if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
+ InlineFunctionGenerator generator =
+ FindInlineFunctionGenerator(function->function_id);
+ if (generator != NULL) {
+ ((*this).*(generator))(args);
return true;
}
}
@@ -385,14 +380,6 @@
}
-int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
- CodeGenerator::InlineRuntimeLUT* f =
- CodeGenerator::FindInlineRuntimeLUT(name);
- if (f != NULL) return f->nargs;
- return -1;
-}
-
-
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
diff --git a/src/codegen.h b/src/codegen.h
index aa2d442..2a4d9d4 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -71,48 +71,6 @@
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
-
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
- F(IsSmi, 1, 1) \
- F(IsNonNegativeSmi, 1, 1) \
- F(IsArray, 1, 1) \
- F(IsRegExp, 1, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
- F(IsConstructCall, 0, 1) \
- F(ArgumentsLength, 0, 1) \
- F(Arguments, 1, 1) \
- F(ClassOf, 1, 1) \
- F(ValueOf, 1, 1) \
- F(SetValueOf, 2, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(ObjectEquals, 2, 1) \
- F(Log, 3, 1) \
- F(RandomHeapNumber, 0, 1) \
- F(IsObject, 1, 1) \
- F(IsFunction, 1, 1) \
- F(IsUndetectableObject, 1, 1) \
- F(IsSpecObject, 1, 1) \
- F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
- F(StringCompare, 2, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(RegExpCloneResult, 1, 1) \
- F(GetFromCache, 2, 1) \
- F(NumberToString, 1, 1) \
- F(SwapElements, 3, 1) \
- F(MathPow, 2, 1) \
- F(MathSin, 1, 1) \
- F(MathCos, 1, 1) \
- F(MathSqrt, 1, 1) \
- F(IsRegExpEquivalent, 2, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1)
-
-
#if V8_TARGET_ARCH_IA32
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a63088d..263a2a4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -174,6 +174,10 @@
DEFINE_int(max_stack_trace_source_length, 300,
"maximum length of function source code printed in a stack trace.")
+// full-codegen.cc
+DEFINE_bool(always_inline_smi_code, false,
+ "always inline smi code in non-opt code")
+
// heap.cc
DEFINE_int(max_new_space_size, 0, "max size of the new generation")
DEFINE_int(max_old_space_size, 0, "max size of the old generation")
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 7de4a00..a1c5ec3 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -324,15 +324,11 @@
bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
- // TODO(kasperl): Once the compare stub allows leaving out the
- // inlined smi case, we should get rid of this check.
- if (Token::IsCompareOp(op)) return true;
- // TODO(kasperl): Once the unary bit not stub allows leaving out
- // the inlined smi case, we should get rid of this check.
- if (op == Token::BIT_NOT) return true;
// Inline smi case inside loops, but not division and modulo which
// are too complicated and take up too much space.
- return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
+ if (op == Token::DIV ||op == Token::MOD) return false;
+ if (FLAG_always_inline_smi_code) return true;
+ return loop_depth_ > 0;
}
@@ -505,18 +501,36 @@
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- SmartPointer<char> cstring = name->ToCString();
+// Lookup table for code generators for special runtime calls which are
+// generated inline.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
+ &FullCodeGenerator::Emit##Name,
-#define CHECK_EMIT_INLINE_CALL(name, x, y) \
- if (strcmp("_"#name, *cstring) == 0) { \
- Emit##name(expr->arguments()); \
- return; \
- }
- INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
-#undef CHECK_EMIT_INLINE_CALL
- UNREACHABLE();
+const FullCodeGenerator::InlineFunctionGenerator
+ FullCodeGenerator::kInlineFunctionGenerators[] = {
+ INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ };
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+
+
+FullCodeGenerator::InlineFunctionGenerator
+ FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
+ return kInlineFunctionGenerators[
+ static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
+}
+
+
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
+ ZoneList<Expression*>* args = node->arguments();
+ Handle<String> name = node->name();
+ Runtime::Function* function = node->function();
+ ASSERT(function != NULL);
+ ASSERT(function->intrinsic_type == Runtime::INLINE);
+ InlineFunctionGenerator generator =
+ FindInlineFunctionGenerator(function->function_id);
+ ASSERT(generator != NULL);
+ ((*this).*(generator))(args);
}
diff --git a/src/full-codegen.h b/src/full-codegen.h
index ab0fd36..9db233c 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -243,6 +243,12 @@
kRightConstant
};
+ // Type of a member function that generates inline code for a native function.
+ typedef void (FullCodeGenerator::*InlineFunctionGenerator)
+ (ZoneList<Expression*>*);
+
+ static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
// Compute the frame pointer relative offset for a given local or
// parameter slot.
int SlotOffset(Slot* slot);
@@ -373,10 +379,13 @@
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
// Platform-specific code for inline runtime calls.
+ InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
+
void EmitInlineRuntimeCall(CallRuntime* expr);
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
void Emit##name(ZoneList<Expression*>* arguments);
+ INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f089b85..a909caf 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -372,13 +372,14 @@
int post_gc_processing_count = 0;
-void GlobalHandles::PostGarbageCollectionProcessing() {
+bool GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
+ bool weak_callback_invoked = false;
Node** p = &head_;
while (*p != NULL) {
if ((*p)->PostGarbageCollectionProcessing()) {
@@ -389,6 +390,7 @@
// restart the processing).
break;
}
+ weak_callback_invoked = true;
}
if ((*p)->state_ == Node::DESTROYED) {
// Delete the link.
@@ -407,6 +409,7 @@
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
+ return weak_callback_invoked;
}
diff --git a/src/global-handles.h b/src/global-handles.h
index 659f86e..c4c59fd 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -95,8 +95,9 @@
// Tells whether global handle is weak.
static bool IsWeak(Object** location);
- // Process pending weak handles.
- static void PostGarbageCollectionProcessing();
+ // Process pending weak handles. Returns true if any weak handle
+ // callback has been invoked.
+ static bool PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);
diff --git a/src/handles.cc b/src/handles.cc
index 0146401..655254c 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -31,7 +31,6 @@
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
-#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "execution.h"
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 0d1ad5a..8f7dd3b 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -35,6 +35,16 @@
namespace v8 {
namespace internal {
+void Heap::UpdateOldSpaceLimits() {
+ int old_gen_size = PromotedSpaceSize();
+ old_gen_promotion_limit_ =
+ old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+ old_gen_allocation_limit_ =
+ old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+ old_gen_exhausted_ = false;
+}
+
+
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
@@ -403,7 +413,7 @@
} \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
- Heap::CollectAllGarbage(false); \
+ Heap::CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__object__ = FUNCTION_CALL; \
diff --git a/src/heap.cc b/src/heap.cc
index 443c926..650800f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -55,7 +55,6 @@
String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength];
-
NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL;
OldSpace* Heap::old_data_space_ = NULL;
@@ -64,9 +63,6 @@
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
-static const int kMinimumPromotionLimit = 2*MB;
-static const int kMinimumAllocationLimit = 8*MB;
-
int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
@@ -405,17 +401,26 @@
}
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(bool force_compaction,
+ CollectionPolicy collectionPolicy) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
MarkCompactCollector::SetForceCompaction(force_compaction);
- CollectGarbage(0, OLD_POINTER_SPACE);
+ CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
MarkCompactCollector::SetForceCompaction(false);
}
-bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
+void Heap::CollectAllAvailableGarbage() {
+ CompilationCache::Clear();
+ CollectAllGarbage(true, AGGRESSIVE);
+}
+
+
+bool Heap::CollectGarbage(int requested_size,
+ AllocationSpace space,
+ CollectionPolicy collectionPolicy) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@@ -442,7 +447,7 @@
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
- PerformGarbageCollection(space, collector, &tracer);
+ PerformGarbageCollection(collector, &tracer, collectionPolicy);
rate->Stop();
GarbageCollectionEpilogue();
@@ -475,7 +480,7 @@
void Heap::PerformScavenge() {
GCTracer tracer;
- PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
+ PerformGarbageCollection(SCAVENGER, &tracer, NORMAL);
}
@@ -664,9 +669,9 @@
survival_rate_ = survival_rate;
}
-void Heap::PerformGarbageCollection(AllocationSpace space,
- GarbageCollector collector,
- GCTracer* tracer) {
+void Heap::PerformGarbageCollection(GarbageCollector collector,
+ GCTracer* tracer,
+ CollectionPolicy collectionPolicy) {
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
@@ -696,25 +701,45 @@
UpdateSurvivalRateTrend(start_new_space_size);
- int old_gen_size = PromotedSpaceSize();
- old_gen_promotion_limit_ =
- old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
- old_gen_allocation_limit_ =
- old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+ UpdateOldSpaceLimits();
- if (high_survival_rate_during_scavenges &&
- IsStableOrIncreasingSurvivalTrend()) {
- // Stable high survival rates of young objects both during partial and
- // full collection indicate that mutator is either building or modifying
- // a structure with a long lifetime.
- // In this case we aggressively raise old generation memory limits to
- // postpone subsequent mark-sweep collection and thus trade memory
- // space for the mutation speed.
- old_gen_promotion_limit_ *= 2;
- old_gen_allocation_limit_ *= 2;
+ // Major GC would invoke weak handle callbacks on weakly reachable
+ // handles, but won't collect weakly reachable objects until next
+ // major GC. Therefore if we collect aggressively and weak handle callback
+ // has been invoked, we rerun major GC to release objects which become
+ // garbage.
+ if (collectionPolicy == AGGRESSIVE) {
+ // Note: as weak callbacks can execute arbitrary code, we cannot
+ // hope that eventually there will be no weak callbacks invocations.
+ // Therefore stop recollecting after several attempts.
+ const int kMaxNumberOfAttempts = 7;
+ for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
+ { DisableAssertNoAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
+ }
+ MarkCompact(tracer);
+ // Weak handle callbacks can allocate data, so keep limits correct.
+ UpdateOldSpaceLimits();
+ }
+ } else {
+ if (high_survival_rate_during_scavenges &&
+ IsStableOrIncreasingSurvivalTrend()) {
+ // Stable high survival rates of young objects both during partial and
+ // full collection indicate that mutator is either building or modifying
+ // a structure with a long lifetime.
+ // In this case we aggressively raise old generation memory limits to
+ // postpone subsequent mark-sweep collection and thus trade memory
+ // space for the mutation speed.
+ old_gen_promotion_limit_ *= 2;
+ old_gen_allocation_limit_ *= 2;
+ }
}
- old_gen_exhausted_ = false;
+ { DisableAssertNoAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ GlobalHandles::PostGarbageCollectionProcessing();
+ }
} else {
tracer_ = tracer;
Scavenge();
@@ -725,12 +750,6 @@
Counters::objs_since_last_young.Set(0);
- if (collector == MARK_COMPACTOR) {
- DisableAssertNoAllocation allow_allocation;
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- GlobalHandles::PostGarbageCollectionProcessing();
- }
-
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
@@ -1834,6 +1853,13 @@
CreateFixedStubs();
+ // Allocate the dictionary of intrinsic function names.
+ obj = StringDictionary::Allocate(Runtime::kNumFunctions);
+ if (obj->IsFailure()) return false;
+ obj = Runtime::InitializeIntrinsicFunctionNames(obj);
+ if (obj->IsFailure()) return false;
+ set_intrinsic_function_names(StringDictionary::cast(obj));
+
if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character ASCII strings.
diff --git a/src/heap.h b/src/heap.h
index 484cd22..cfb3b6a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -114,6 +114,7 @@
V(Object, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
+ V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#define STRONG_ROOT_LIST(V) \
@@ -686,13 +687,21 @@
static void GarbageCollectionPrologue();
static void GarbageCollectionEpilogue();
+ enum CollectionPolicy { NORMAL, AGGRESSIVE };
+
// Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection.
- static bool CollectGarbage(int required_space, AllocationSpace space);
+ static bool CollectGarbage(int required_space,
+ AllocationSpace space,
+ CollectionPolicy collectionPolicy = NORMAL);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
- static void CollectAllGarbage(bool force_compaction);
+ static void CollectAllGarbage(bool force_compaction,
+ CollectionPolicy collectionPolicy = NORMAL);
+
+ // Last hope GC, should try to squeeze as much as possible.
+ static void CollectAllAvailableGarbage();
// Notify the heap that a context has been disposed.
static int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -1213,9 +1222,14 @@
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
- static void PerformGarbageCollection(AllocationSpace space,
- GarbageCollector collector,
- GCTracer* tracer);
+ static void PerformGarbageCollection(GarbageCollector collector,
+ GCTracer* tracer,
+ CollectionPolicy collectionPolicy);
+
+ static const int kMinimumPromotionLimit = 2 * MB;
+ static const int kMinimumAllocationLimit = 8 * MB;
+
+ inline static void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 366b91e..dccf36b 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1879,36 +1879,36 @@
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
+ Label slow, done, undo;
if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
+ if (include_smi_code_) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
+ if (negative_zero_ == kStrictNegativeZero) {
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+ }
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
+ __ StubReturn(1);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ } else if (FLAG_debug_code) {
+ __ AbortIfSmi(eax);
}
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- Label undo;
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(no_overflow, &done, taken);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
- __ jmp(&slow);
-
- // Try floating point case.
- __ bind(&try_float);
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
@@ -1928,6 +1928,18 @@
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
}
} else if (op_ == Token::BIT_NOT) {
+ if (include_smi_code_) {
+ Label non_smi;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &non_smi);
+ __ not_(eax);
+ __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
+ __ ret(0);
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ __ AbortIfSmi(eax);
+ }
+
// Check if the operand is a heap number.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
@@ -1978,6 +1990,10 @@
__ bind(&done);
__ StubReturn(1);
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(ecx); // pop return address.
@@ -2613,6 +2629,27 @@
Label check_unequal_objects, done;
+ // Compare two smis if required.
+ if (include_smi_compare_) {
+ Label non_smi, smi_done;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &non_smi, not_taken);
+ __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done);
+ __ neg(edx); // Correct sign in case of overflow.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected smi operands.");
+ }
+
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -3501,7 +3538,8 @@
| RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
}
@@ -3541,12 +3579,18 @@
include_number_compare_name = "_NO_NUMBER";
}
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
+ "CompareStub_%s%s%s%s%s",
cc_name,
strict_name,
never_nan_nan_name,
- include_number_compare_name);
+ include_number_compare_name,
+ include_smi_compare_name);
return name_;
}
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 854052a..86f3877 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -2646,6 +2646,19 @@
}
+static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
+ bool inline_number_compare) {
+ CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
+ if (nan_info == kCantBothBeNaN) {
+ flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
+ }
+ if (inline_number_compare) {
+ flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
+ }
+ return flags;
+}
+
+
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -2773,7 +2786,9 @@
// Setup and call the compare stub.
is_not_string.Bind(&left_side);
- CompareStub stub(cc, strict, kCantBothBeNaN);
+ CompareFlags flags =
+ static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
+ CompareStub stub(cc, strict, flags);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ cmp(result.reg(), 0);
@@ -2867,7 +2882,8 @@
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
+ CompareStub stub(cc, strict, flags);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ test(answer.reg(), Operand(answer.reg()));
answer.Unuse();
@@ -2900,7 +2916,9 @@
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ CompareFlags flags =
+ ComputeCompareFlags(nan_info, inline_number_compare);
+ CompareStub stub(cc, strict, flags);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ test(answer.reg(), Operand(answer.reg()));
answer.Unuse();
@@ -2994,7 +3012,6 @@
dest->false_target()->Branch(zero);
} else {
// Do the smi check, then the comparison.
- JumpTarget is_not_smi;
__ test(left_reg, Immediate(kSmiTagMask));
is_smi.Branch(zero, left_side, right_side);
}
@@ -3031,7 +3048,9 @@
}
// Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
+ CompareFlags flags =
+ static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+ CompareStub stub(cc, strict, flags);
Result result = frame_->CallStub(&stub, left_side, right_side);
result.ToRegister();
__ test(result.reg(), Operand(result.reg()));
@@ -8146,6 +8165,7 @@
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
+ NO_UNARY_FLAGS,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -8173,7 +8193,9 @@
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ GenericUnaryOpStub stub(Token::BIT_NOT,
+ overwrite,
+ NO_UNARY_SMI_CODE_IN_STUB);
Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index adc0005..c4a03d1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -345,10 +345,6 @@
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
// Return a position of the element at |index_as_smi| + |additional_offset|
// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand FixedArrayElementOperand(Register array,
@@ -363,6 +359,12 @@
}
private:
+ // Type of a member function that generates inline code for a native function.
+ typedef void (CodeGenerator::*InlineFunctionGenerator)
+ (ZoneList<Expression*>*);
+
+ static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -624,13 +626,9 @@
void CheckStack();
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
+ static InlineFunctionGenerator FindInlineFunctionGenerator(
+ Runtime::FunctionId function_id);
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -792,8 +790,6 @@
// in a spilled state.
bool in_spilled_code_;
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3d1653c..1e65c4b 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -684,7 +684,8 @@
// Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
@@ -697,7 +698,10 @@
__ bind(&slow_case);
}
- CompareStub stub(equal, true);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(equal, true, flags);
__ CallStub(&stub);
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
@@ -988,6 +992,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode);
+ __ nop(); // Signal no inlined code.
}
@@ -3138,7 +3143,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ call(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
+ // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
// Call the C runtime function.
@@ -3257,7 +3262,7 @@
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register eax.
VisitForValue(expr->expression(), kAccumulator);
@@ -3272,7 +3277,8 @@
// in the accumulator register eax.
VisitForValue(expr->expression(), kAccumulator);
Label done;
- if (ShouldInlineSmiCase(expr->op())) {
+ bool inline_smi_case = ShouldInlineSmiCase(expr->op());
+ if (inline_smi_case) {
Label call_stub;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &call_stub);
@@ -3284,7 +3290,10 @@
bool overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode mode =
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+ UnaryOpFlags flags = inline_smi_case
+ ? NO_UNARY_SMI_CODE_IN_STUB
+ : NO_UNARY_FLAGS;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
Apply(context_, eax);
@@ -3483,6 +3492,7 @@
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
if (where == kStack) __ push(eax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
@@ -3672,7 +3682,8 @@
UNREACHABLE();
}
- if (ShouldInlineSmiCase(op)) {
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ if (inline_smi_code) {
Label slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
@@ -3683,7 +3694,10 @@
__ bind(&slow_case);
}
- CompareStub stub(cc, strict);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(cc, strict, flags);
__ CallStub(&stub);
__ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through);
diff --git a/src/parser.cc b/src/parser.cc
index 11e2eb5..856c474 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -996,21 +996,23 @@
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
if (id == 0) {
// Put (symbol_id_ + 1) into entry and increment it.
- symbol_id_++;
- entry->value = reinterpret_cast<void*>(symbol_id_);
+ id = ++symbol_id_;
+ entry->value = reinterpret_cast<void*>(id);
Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
entry->key = &symbol[0];
- } else {
- // Log a reuse of an earlier seen symbol.
- symbol_store_.Add(start);
- symbol_store_.Add(id - 1);
}
+ symbol_store_.Add(id - 1);
}
virtual Vector<unsigned> ExtractData() {
int function_size = function_store_.size();
+ // Add terminator to symbols, then pad to unsigned size.
int symbol_size = symbol_store_.size();
- int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size;
+ int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
+ symbol_store_.AddBlock(padding, ScriptDataImpl::kNumberTerminator);
+ symbol_size += padding;
+ int total_size = ScriptDataImpl::kHeaderSize + function_size
+ + (symbol_size / sizeof(unsigned));
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size;
preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_;
@@ -1020,8 +1022,9 @@
function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize,
symbol_start));
}
- if (symbol_size > 0) {
- symbol_store_.WriteTo(data.SubVector(symbol_start, total_size));
+ if (!has_error()) {
+ symbol_store_.WriteTo(
+ Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
}
return data;
}
@@ -1029,12 +1032,7 @@
virtual int symbol_position() { return symbol_store_.size(); }
virtual int symbol_ids() { return symbol_id_; }
private:
- Collector<unsigned> symbol_store_;
- Collector<Vector<const char> > symbol_entries_;
- HashMap symbol_table_;
- int symbol_id_;
-
- static int vector_hash(Vector<const char> string) {
+ static int vector_hash(Vector<const char> string) {
int hash = 0;
for (int i = 0; i < string.length(); i++) {
int c = string[i];
@@ -1052,6 +1050,14 @@
if (string2->length() != length) return false;
return memcmp(string1->start(), string2->start(), length) == 0;
}
+
+ // Write a non-negative number to the symbol store.
+ void WriteNumber(int number);
+
+ Collector<byte> symbol_store_;
+ Collector<Vector<const char> > symbol_entries_;
+ HashMap symbol_table_;
+ int symbol_id_;
};
@@ -1076,18 +1082,11 @@
}
-int ScriptDataImpl::GetSymbolIdentifier(int start) {
- int next = symbol_index_ + 2;
- if (next <= store_.length()
- && static_cast<int>(store_[symbol_index_]) == start) {
- symbol_index_ = next;
- return store_[next - 1];
- }
- return symbol_id_++;
+int ScriptDataImpl::GetSymbolIdentifier() {
+ return ReadNumber(&symbol_data_);
}
-
bool ScriptDataImpl::SanityCheck() {
// Check that the header data is valid and doesn't specify
// point to positions outside the store.
@@ -1118,7 +1117,7 @@
int symbol_count =
static_cast<int>(store_[ScriptDataImpl::kSymbolCountOffset]);
if (symbol_count < 0) return false;
- // Check that the total size has room both function entries.
+ // Check that the total size has room for header and function entries.
int minimum_size =
ScriptDataImpl::kHeaderSize + functions_size;
if (store_.length() < minimum_size) return false;
@@ -1158,6 +1157,22 @@
}
+void CompleteParserRecorder::WriteNumber(int number) {
+ ASSERT(number >= 0);
+
+ int mask = (1 << 28) - 1;
+ for (int i = 28; i > 0; i -= 7) {
+ if (number > mask) {
+ symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
+ number &= mask;
+ }
+ mask >>= 7;
+ }
+ symbol_store_.Add(static_cast<byte>(number));
+}
+
+
+
const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
int length = start[0];
char* result = NewArray<char>(length + 1);
@@ -1206,7 +1221,8 @@
Vector<const char*> ScriptDataImpl::BuildArgs() {
int arg_count = Read(kMessageArgCountPos);
const char** array = NewArray<const char*>(arg_count);
- // Position after the string starting at position 3.
+ // Position after text found by skipping past length field and
+ // length field content words.
int pos = kMessageTextPos + 1 + Read(kMessageTextPos);
for (int i = 0; i < arg_count; i++) {
int count = 0;
@@ -1287,7 +1303,8 @@
public:
CompletePreParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension)
- : PreParser(script, allow_natives_syntax, extension, &recorder_) { }
+ : PreParser(script, allow_natives_syntax, extension, &recorder_),
+ recorder_() { }
virtual PartialParserRecorder* recorder() { return &recorder_; }
private:
CompleteParserRecorder recorder_;
@@ -1298,7 +1315,8 @@
public:
PartialPreParser(Handle<Script> script, bool allow_natives_syntax,
v8::Extension* extension)
- : PreParser(script, allow_natives_syntax, extension, &recorder_) { }
+ : PreParser(script, allow_natives_syntax, extension, &recorder_),
+ recorder_() { }
virtual PartialParserRecorder* recorder() { return &recorder_; }
private:
PartialParserRecorder recorder_;
@@ -1639,17 +1657,12 @@
Handle<String> Parser::GetSymbol(bool* ok) {
- if (pre_data() != NULL) {
- int symbol_id =
- pre_data()->GetSymbolIdentifier(scanner_.location().beg_pos);
- if (symbol_id < 0) {
- ReportInvalidPreparseData(Factory::empty_symbol(), ok);
- return Handle<String>::null();
- }
- return factory()->LookupSymbol(symbol_id, scanner_.literal());
- }
log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
- return factory()->LookupSymbol(-1, scanner_.literal());
+ int symbol_id = -1;
+ if (pre_data() != NULL) {
+ symbol_id = pre_data()->GetSymbolIdentifier();
+ }
+ return factory()->LookupSymbol(symbol_id, scanner_.literal());
}
@@ -4176,8 +4189,7 @@
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
pre_data()->Skip(entry.predata_function_skip(),
- entry.predata_symbol_skip(),
- entry.symbol_id_skip());
+ entry.predata_symbol_skip());
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
@@ -4191,7 +4203,6 @@
FunctionEntry entry = log()->LogFunction(function_block_pos);
int predata_function_position_before = log()->function_position();
int predata_symbol_position_before = log()->symbol_position();
- int symbol_ids_before = log()->symbol_ids();
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
@@ -4209,8 +4220,6 @@
log()->function_position() - predata_function_position_before);
entry.set_predata_symbol_skip(
log()->symbol_position() - predata_symbol_position_before);
- entry.set_symbol_id_skip(
- log()->symbol_ids() - symbol_ids_before);
}
}
@@ -4243,58 +4252,43 @@
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
- Runtime::Function* function =
- Runtime::FunctionForName(scanner_.literal());
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- if (function == NULL && extension_ != NULL) {
+ if (is_pre_parsing_) return NULL;
+
+ if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
top_scope_->ForceEagerCompilation();
}
- // Check for built-in macros.
- if (!is_pre_parsing_) {
- if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
- // %IS_VAR(x)
- // evaluates to x if x is a variable,
- // leads to a parse error otherwise
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
- return args->at(0);
- }
- *ok = false;
- // Check here for other macros.
- // } else if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
- // ...
- }
+ Runtime::Function* function = Runtime::FunctionForSymbol(name);
- if (!*ok) {
- // We found a macro but it failed.
+ // Check for built-in IS_VAR macro.
+ if (function != NULL &&
+ function->intrinsic_type == Runtime::RUNTIME &&
+ function->function_id == Runtime::kIS_VAR) {
+ // %IS_VAR(x) evaluates to x if x is a variable,
+ // leads to a parse error otherwise. Could be implemented as an
+ // inline function %_IS_VAR(x) to eliminate this special case.
+ if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
+ return args->at(0);
+ } else {
ReportMessage("unable_to_parse", Vector<const char*>::empty());
- return NULL;
- }
- }
-
- // Check that the expected number arguments are passed to runtime functions.
- if (!is_pre_parsing_) {
- if (function != NULL
- && function->nargs != -1
- && function->nargs != args->length()) {
- ReportMessage("illegal_access", Vector<const char*>::empty());
*ok = false;
return NULL;
- } else if (function == NULL && !name.is_null()) {
- // If this is not a runtime function implemented in C++ it might be an
- // inlined runtime function.
- int argc = CodeGenerator::InlineRuntimeCallArgumentsCount(name);
- if (argc != -1 && argc != args->length()) {
- ReportMessage("illegal_access", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
}
}
- // Otherwise we have a valid runtime call.
+ // Check that the expected number of arguments are being passed.
+ if (function != NULL &&
+ function->nargs != -1 &&
+ function->nargs != args->length()) {
+ ReportMessage("illegal_access", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
+ // We have a valid intrinsics call or a call to a builtin.
return NEW(CallRuntime(name, function, args));
}
@@ -5497,6 +5491,47 @@
}
+void ScriptDataImpl::Initialize() {
+ if (store_.length() >= kHeaderSize) {
+ int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
+ if (store_.length() > symbol_data_offset) {
+ symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
+ } else {
+ // Partial preparse causes no symbol information.
+ symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
+ }
+ symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
+ }
+}
+
+
+int ScriptDataImpl::ReadNumber(byte** source) {
+ // Reads a number from symbol_data_ in base 128. The most significant
+ // bit marks that there are more digits.
+ // If the first byte is 0x80 (kNumberTerminator), it would normally
+ // represent a leading zero. Since that is useless, and therefore won't
+ // appear as the first digit of any actual value, it is used to
+ // mark the end of the input stream.
+ byte* data = *source;
+ if (data >= symbol_data_end_) return -1;
+ byte input = *data;
+ if (input == kNumberTerminator) {
+ // End of stream marker.
+ return -1;
+ }
+ int result = input & 0x7f;
+ data++;
+ while ((input & 0x80u) != 0) {
+ if (data >= symbol_data_end_) return -1;
+ input = *data;
+ result = (result << 7) | (input & 0x7f);
+ data++;
+ }
+ *source = data;
+ return result;
+}
+
+
ScriptDataImpl* PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
v8::Extension* extension) {
diff --git a/src/parser.h b/src/parser.h
index c3e947f..8bab92d 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -82,15 +82,9 @@
backing_[kPredataSymbolSkipOffset] = value;
}
- int symbol_id_skip() { return backing_[kSymbolIdSkipOffset]; }
- void set_symbol_id_skip(int value) {
- backing_[kSymbolIdSkipOffset] = value;
- }
-
-
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 7;
+ static const int kSize = 6;
private:
Vector<unsigned> backing_;
@@ -100,7 +94,6 @@
static const int kPropertyCountOffset = 3;
static const int kPredataFunctionSkipOffset = 4;
static const int kPredataSymbolSkipOffset = 5;
- static const int kSymbolIdSkipOffset = 6;
};
@@ -109,18 +102,10 @@
explicit ScriptDataImpl(Vector<unsigned> store)
: store_(store),
function_index_(kHeaderSize),
- symbol_id_(0),
owns_store_(true) {
Initialize();
}
- void Initialize() {
- if (store_.length() >= kHeaderSize) {
- // Otherwise we won't satisfy the SanityCheck.
- symbol_index_ = kHeaderSize + store_[kFunctionsSizeOffset];
- }
- }
-
// Create an empty ScriptDataImpl that is guaranteed to not satisfy
// a SanityCheck.
ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
@@ -130,8 +115,11 @@
virtual const char* Data();
virtual bool HasError();
+ void Initialize();
+ void ReadNextSymbolPosition();
+
FunctionEntry GetFunctionEntry(int start);
- int GetSymbolIdentifier(int start);
+ int GetSymbolIdentifier();
void SkipFunctionEntry(int start);
bool SanityCheck();
@@ -149,19 +137,27 @@
unsigned version() { return store_[kVersionOffset]; }
// Skip forward in the preparser data by the given number
- // of unsigned ints.
- virtual void Skip(int function_entries, int symbol_entries, int symbol_ids) {
+ // of unsigned ints of function entries and the given number of bytes of
+ // symbol id encoding.
+ void Skip(int function_entries, int symbol_entries) {
ASSERT(function_entries >= 0);
ASSERT(function_entries
<= (static_cast<int>(store_[kFunctionsSizeOffset])
- (function_index_ - kHeaderSize)));
- function_index_ += function_entries;
- symbol_index_ += symbol_entries;
- symbol_id_ += symbol_ids;
+ ASSERT(symbol_entries >= 0);
+ ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
+
+ unsigned max_function_skip = store_[kFunctionsSizeOffset] -
+ static_cast<unsigned>(function_index_ - kHeaderSize);
+ function_index_ +=
+ Min(static_cast<unsigned>(function_entries), max_function_skip);
+ symbol_data_ +=
+ Min(static_cast<unsigned>(symbol_entries),
+ static_cast<unsigned>(symbol_data_end_ - symbol_data_));
}
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 2;
+ static const unsigned kCurrentVersion = 3;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
@@ -171,26 +167,30 @@
static const int kSizeOffset = 5;
static const int kHeaderSize = 6;
+ // If encoding a message, the following positions are fixed.
static const int kMessageStartPos = 0;
static const int kMessageEndPos = 1;
static const int kMessageArgCountPos = 2;
static const int kMessageTextPos = 3;
+ static const byte kNumberTerminator = 0x80u;
+
private:
Vector<unsigned> store_;
+ unsigned char* symbol_data_;
+ unsigned char* symbol_data_end_;
int function_index_;
- int symbol_index_;
- int symbol_id_;
bool owns_store_;
unsigned Read(int position);
unsigned* ReadAddress(int position);
+ // Reads a number from the current symbols
+ int ReadNumber(byte** source);
ScriptDataImpl(const char* backing_store, int length)
: store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
length / sizeof(unsigned)),
function_index_(kHeaderSize),
- symbol_id_(0),
owns_store_(false) {
ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
Initialize();
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2de7a2f..f8fa23d 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -31,6 +31,7 @@
#include "global-handles.h"
#include "scopeinfo.h"
#include "top.h"
+#include "unicode.h"
#include "zone-inl.h"
#include "profile-generator-inl.h"
@@ -2132,6 +2133,357 @@
return diff;
}
+
+class OutputStreamWriter {
+ public:
+ explicit OutputStreamWriter(v8::OutputStream* stream)
+ : stream_(stream),
+ chunk_size_(stream->GetChunkSize()),
+ chunk_(chunk_size_),
+ chunk_pos_(0),
+ aborted_(false) {
+ ASSERT(chunk_size_ > 0);
+ }
+ bool aborted() { return aborted_; }
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(chunk_pos_ < chunk_size_);
+ chunk_[chunk_pos_++] = c;
+ MaybeWriteChunk();
+ }
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+ void AddSubstring(const char* s, int n) {
+ if (n <= 0) return;
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ const char* s_end = s + n;
+ while (s < s_end) {
+ int s_chunk_size = Min(
+ chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
+ ASSERT(s_chunk_size > 0);
+ memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ s += s_chunk_size;
+ chunk_pos_ += s_chunk_size;
+ MaybeWriteChunk();
+ }
+ }
+ void AddNumber(int n) { AddNumberImpl<int>(n, "%d"); }
+ void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
+ void AddNumber(uint64_t n) { AddNumberImpl<uint64_t>(n, "%llu"); }
+ void Finalize() {
+ if (aborted_) return;
+ ASSERT(chunk_pos_ < chunk_size_);
+ if (chunk_pos_ != 0) {
+ WriteChunk();
+ }
+ stream_->EndOfStream();
+ }
+
+ private:
+ template<typename T>
+ void AddNumberImpl(T n, const char* format) {
+ ScopedVector<char> buffer(32);
+ int result = OS::SNPrintF(buffer, format, n);
+ USE(result);
+ ASSERT(result != -1);
+ AddString(buffer.start());
+ }
+ void MaybeWriteChunk() {
+ ASSERT(chunk_pos_ <= chunk_size_);
+ if (chunk_pos_ == chunk_size_) {
+ WriteChunk();
+ chunk_pos_ = 0;
+ }
+ }
+ void WriteChunk() {
+ if (aborted_) return;
+ if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
+ v8::OutputStream::kAbort) aborted_ = true;
+ }
+
+ v8::OutputStream* stream_;
+ int chunk_size_;
+ ScopedVector<char> chunk_;
+ int chunk_pos_;
+ bool aborted_;
+};
+
+void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ ASSERT(writer_ == NULL);
+ writer_ = new OutputStreamWriter(stream);
+
+ // Since nodes graph is cyclic, we need the first pass to enumerate
+ // them. Strings can be serialized in one pass.
+ EnumerateNodes();
+ SerializeImpl();
+
+ delete writer_;
+ writer_ = NULL;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeImpl() {
+ writer_->AddCharacter('{');
+ writer_->AddString("\"snapshot\":{");
+ SerializeSnapshot();
+ if (writer_->aborted()) return;
+ writer_->AddString("},\n");
+ writer_->AddString("\"nodes\":[");
+ SerializeNodes();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"strings\":[");
+ SerializeStrings();
+ if (writer_->aborted()) return;
+ writer_->AddCharacter(']');
+ writer_->AddCharacter('}');
+ writer_->Finalize();
+}
+
+
+class HeapSnapshotJSONSerializerEnumerator {
+ public:
+ explicit HeapSnapshotJSONSerializerEnumerator(HeapSnapshotJSONSerializer* s)
+ : s_(s) {
+ }
+ void Apply(HeapEntry** entry) {
+ s_->GetNodeId(*entry);
+ }
+ private:
+ HeapSnapshotJSONSerializer* s_;
+};
+
+void HeapSnapshotJSONSerializer::EnumerateNodes() {
+ GetNodeId(snapshot_->root()); // Make sure root gets the first id.
+ HeapSnapshotJSONSerializerEnumerator iter(this);
+ snapshot_->IterateEntries(&iter);
+}
+
+
+int HeapSnapshotJSONSerializer::GetNodeId(HeapEntry* entry) {
+ HashMap::Entry* cache_entry = nodes_.Lookup(entry, ObjectHash(entry), true);
+ if (cache_entry->value == NULL) {
+ cache_entry->value = reinterpret_cast<void*>(next_node_id_++);
+ }
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
+ HashMap::Entry* cache_entry = strings_.Lookup(
+ const_cast<char*>(s), ObjectHash(s), true);
+ if (cache_entry->value == NULL) {
+ cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
+ }
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
+ writer_->AddCharacter(',');
+ writer_->AddNumber(edge->type());
+ writer_->AddCharacter(',');
+ if (edge->type() == HeapGraphEdge::kElement) {
+ writer_->AddNumber(edge->index());
+ } else {
+ writer_->AddNumber(GetStringId(edge->name()));
+ }
+ writer_->AddCharacter(',');
+ writer_->AddNumber(GetNodeId(edge->to()));
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+ writer_->AddCharacter('\n');
+ writer_->AddCharacter(',');
+ writer_->AddNumber(entry->type());
+ writer_->AddCharacter(',');
+ writer_->AddNumber(GetStringId(entry->name()));
+ writer_->AddCharacter(',');
+ writer_->AddNumber(entry->id());
+ writer_->AddCharacter(',');
+ writer_->AddNumber(entry->self_size());
+ Vector<HeapGraphEdge> children = entry->children();
+ writer_->AddCharacter(',');
+ writer_->AddNumber(children.length());
+ for (int i = 0; i < children.length(); ++i) {
+ SerializeEdge(&children[i]);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+ // The first (zero) item of nodes array is a JSON-ified object
+ // describing node serialization layout.
+ // We use a set of macros to improve readability.
+#define JSON_A(s) "["s"]"
+#define JSON_O(s) "{"s"}"
+#define JSON_S(s) "\\\""s"\\\""
+ writer_->AddString("\"" JSON_O(
+ JSON_S("fields") ":" JSON_A(
+ JSON_S("type")
+ "," JSON_S("name")
+ "," JSON_S("id")
+ "," JSON_S("self_size")
+ "," JSON_S("children_count")
+ "," JSON_S("children"))
+ "," JSON_S("types") ":" JSON_A(
+ JSON_A(
+ JSON_S("internal")
+ "," JSON_S("array")
+ "," JSON_S("string")
+ "," JSON_S("object")
+ "," JSON_S("code")
+ "," JSON_S("closure"))
+ "," JSON_S("string")
+ "," JSON_S("number")
+ "," JSON_S("number")
+ "," JSON_S("number")
+ "," JSON_O(
+ JSON_S("fields") ":" JSON_A(
+ JSON_S("type")
+ "," JSON_S("name_or_index")
+ "," JSON_S("to_node"))
+ "," JSON_S("types") ":" JSON_A(
+ JSON_A(
+ JSON_S("context")
+ "," JSON_S("element")
+ "," JSON_S("property")
+ "," JSON_S("internal"))
+ "," JSON_S("string_or_number")
+ "," JSON_S("node"))))) "\"");
+#undef JSON_S
+#undef JSON_O
+#undef JSON_A
+
+ const int node_fields_count = 5; // type,name,id,self_size,children_count.
+ const int edge_fields_count = 3; // type,name|index,to_node.
+ List<HashMap::Entry*> sorted_nodes;
+ SortHashMap(&nodes_, &sorted_nodes);
+ // Rewrite node ids, so they refer to actual array positions.
+ if (sorted_nodes.length() > 1) {
+ // Nodes start from array index 1.
+ int prev_value = 1;
+ sorted_nodes[0]->value = reinterpret_cast<void*>(prev_value);
+ for (int i = 1; i < sorted_nodes.length(); ++i) {
+ HeapEntry* prev_heap_entry =
+ reinterpret_cast<HeapEntry*>(sorted_nodes[i-1]->key);
+ prev_value += node_fields_count +
+ prev_heap_entry->children().length() * edge_fields_count;
+ sorted_nodes[i]->value = reinterpret_cast<void*>(prev_value);
+ }
+ }
+ for (int i = 0; i < sorted_nodes.length(); ++i) {
+ SerializeNode(reinterpret_cast<HeapEntry*>(sorted_nodes[i]->key));
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeSnapshot() {
+ writer_->AddString("\"title\":\"");
+ writer_->AddString(snapshot_->title());
+ writer_->AddString("\"");
+ writer_->AddString(",\"uid\":");
+ writer_->AddNumber(snapshot_->uid());
+}
+
+
+static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
+ static const char hex_chars[] = "0123456789ABCDEF";
+ w->AddString("\\u");
+ w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
+ w->AddCharacter(hex_chars[u & 0xf]);
+}
+
+void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
+ writer_->AddCharacter('\n');
+ writer_->AddCharacter('\"');
+ for ( ; *s != '\0'; ++s) {
+ switch (*s) {
+ case '\b':
+ writer_->AddString("\\b");
+ continue;
+ case '\f':
+ writer_->AddString("\\f");
+ continue;
+ case '\n':
+ writer_->AddString("\\n");
+ continue;
+ case '\r':
+ writer_->AddString("\\r");
+ continue;
+ case '\t':
+ writer_->AddString("\\t");
+ continue;
+ case '\"':
+ case '\\':
+ writer_->AddCharacter('\\');
+ writer_->AddCharacter(*s);
+ continue;
+ default:
+ if (*s > 31 && *s < 128) {
+ writer_->AddCharacter(*s);
+ } else if (*s <= 31) {
+ // Special character with no dedicated literal.
+ WriteUChar(writer_, *s);
+ } else {
+ // Convert UTF-8 into \u UTF-16 literal.
+ unsigned length = 1, cursor = 0;
+ for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
+ unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
+ if (c != unibrow::Utf8::kBadChar) {
+ WriteUChar(writer_, c);
+ ASSERT(cursor != 0);
+ s += cursor - 1;
+ } else {
+ writer_->AddCharacter('?');
+ }
+ }
+ }
+ }
+ writer_->AddCharacter('\"');
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeStrings() {
+ List<HashMap::Entry*> sorted_strings;
+ SortHashMap(&strings_, &sorted_strings);
+ writer_->AddString("\"<dummy>\"");
+ for (int i = 0; i < sorted_strings.length(); ++i) {
+ writer_->AddCharacter(',');
+ SerializeString(
+ reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+ if (writer_->aborted()) return;
+ }
+}
+
+
+template<typename T>
+inline static int SortUsingEntryValue(const T* x, const T* y) {
+ uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
+ uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
+ if (x_uint > y_uint) {
+ return 1;
+ } else if (x_uint == y_uint) {
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SortHashMap(
+ HashMap* map, List<HashMap::Entry*>* sorted_entries) {
+ for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
+ sorted_entries->Add(p);
+ sorted_entries->Sort(SortUsingEntryValue);
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c6d6f4c..c2bc4ce 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -976,6 +976,54 @@
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
};
+class OutputStreamWriter;
+
+class HeapSnapshotJSONSerializer {
+ public:
+ explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ nodes_(ObjectsMatch),
+ strings_(ObjectsMatch),
+ next_node_id_(1),
+ next_string_id_(1),
+ writer_(NULL) {
+ }
+ void Serialize(v8::OutputStream* stream);
+
+ private:
+ INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
+ INLINE(static uint32_t ObjectHash(const void* key)) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(key));
+ }
+
+ void EnumerateNodes();
+ int GetNodeId(HeapEntry* entry);
+ int GetStringId(const char* s);
+ void SerializeEdge(HeapGraphEdge* edge);
+ void SerializeImpl();
+ void SerializeNode(HeapEntry* entry);
+ void SerializeNodes();
+ void SerializeSnapshot();
+ void SerializeString(const unsigned char* s);
+ void SerializeStrings();
+ void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
+
+ HeapSnapshot* snapshot_;
+ HashMap nodes_;
+ HashMap strings_;
+ int next_node_id_;
+ int next_string_id_;
+ OutputStreamWriter* writer_;
+
+ friend class HeapSnapshotJSONSerializerEnumerator;
+ friend class HeapSnapshotJSONSerializerIterator;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
+};
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/runtime.cc b/src/runtime.cc
index a1f6810..627ea12 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4519,7 +4519,6 @@
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
double value = StringToInt(s, radix);
return Heap::NumberFromDouble(value);
- return Heap::nan_value();
}
@@ -10064,6 +10063,7 @@
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
inline_runtime_functions = true;
+ INLINE_FUNCTION_LIST(ADD_ENTRY)
INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
return *result;
@@ -10090,35 +10090,55 @@
// ----------------------------------------------------------------------------
// Implementation of Runtime
-#define F(name, nargs, ressize) \
- { #name, FUNCTION_ADDR(Runtime_##name), nargs, \
- static_cast<int>(Runtime::k##name), ressize },
+#define F(name, number_of_args, result_size) \
+ { Runtime::k##name, Runtime::RUNTIME, #name, \
+ FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
-static Runtime::Function Runtime_functions[] = {
+
+#define I(name, number_of_args, result_size) \
+ { Runtime::kInline##name, Runtime::INLINE, \
+ "_" #name, NULL, number_of_args, result_size },
+
+Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
- { NULL, NULL, 0, -1, 0 }
+ INLINE_FUNCTION_LIST(I)
+ INLINE_RUNTIME_FUNCTION_LIST(I)
};
-#undef F
-
-Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
- ASSERT(0 <= fid && fid < kNofFunctions);
- return &Runtime_functions[fid];
+Object* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
+ ASSERT(dictionary != NULL);
+ ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
+ for (int i = 0; i < kNumFunctions; ++i) {
+ Object* name_symbol = Heap::LookupAsciiSymbol(kIntrinsicFunctions[i].name);
+ if (name_symbol->IsFailure()) return name_symbol;
+ StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
+ dictionary = string_dictionary->Add(String::cast(name_symbol),
+ Smi::FromInt(i),
+ PropertyDetails(NONE, NORMAL));
+ // Non-recoverable failure. Calling code must restart heap initialization.
+ if (dictionary->IsFailure()) return dictionary;
+ }
+ return dictionary;
}
-Runtime::Function* Runtime::FunctionForName(Vector<const char> name) {
- for (Function* f = Runtime_functions; f->name != NULL; f++) {
- if (strncmp(f->name, name.start(), name.length()) == 0
- && f->name[name.length()] == 0) {
- return f;
- }
+Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+ int entry = Heap::intrinsic_function_names()->FindEntry(*name);
+ if (entry != kNotFound) {
+ Object* smi_index = Heap::intrinsic_function_names()->ValueAt(entry);
+ int function_index = Smi::cast(smi_index)->value();
+ return &(kIntrinsicFunctions[function_index]);
}
return NULL;
}
+Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
+ return &(kIntrinsicFunctions[static_cast<int>(id)]);
+}
+
+
void Runtime::PerformGC(Object* result) {
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
diff --git a/src/runtime.h b/src/runtime.h
index 312907a..8a3671a 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -389,6 +389,59 @@
RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
// ----------------------------------------------------------------------------
+// INLINE_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_FUNCTION_LIST(F) \
+ F(IsSmi, 1, 1) \
+ F(IsNonNegativeSmi, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(IsRegExp, 1, 1) \
+ F(CallFunction, -1 /* receiver + n args + function */, 1) \
+ F(ArgumentsLength, 0, 1) \
+ F(Arguments, 1, 1) \
+ F(ValueOf, 1, 1) \
+ F(SetValueOf, 2, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
+ F(ObjectEquals, 2, 1) \
+ F(RandomHeapNumber, 0, 1) \
+ F(IsObject, 1, 1) \
+ F(IsFunction, 1, 1) \
+ F(IsUndetectableObject, 1, 1) \
+ F(IsSpecObject, 1, 1) \
+ F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
+ F(MathPow, 2, 1) \
+ F(MathSin, 1, 1) \
+ F(MathCos, 1, 1) \
+ F(MathSqrt, 1, 1) \
+ F(IsRegExpEquivalent, 2, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1)
+
+
+// ----------------------------------------------------------------------------
+// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+ // a corresponding runtime function, that is called for slow cases.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+ F(IsConstructCall, 0, 1) \
+ F(ClassOf, 1, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(Log, 3, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(RegExpExec, 4, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(RegExpCloneResult, 1, 1) \
+ F(GetFromCache, 2, 1) \
+ F(NumberToString, 1, 1) \
+ F(SwapElements, 3, 1)
+
+
+//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
class Runtime : public AllStatic {
@@ -396,33 +449,52 @@
enum FunctionId {
#define F(name, nargs, ressize) k##name,
RUNTIME_FUNCTION_LIST(F)
- kNofFunctions
#undef F
+#define F(name, nargs, ressize) kInline##name,
+ INLINE_FUNCTION_LIST(F)
+ INLINE_RUNTIME_FUNCTION_LIST(F)
+#undef F
+ kNumFunctions,
+ kFirstInlineFunction = kInlineIsSmi
};
- // Runtime function descriptor.
+ enum IntrinsicType {
+ RUNTIME,
+ INLINE
+ };
+
+ // Intrinsic function descriptor.
struct Function {
+ FunctionId function_id;
+ IntrinsicType intrinsic_type;
// The JS name of the function.
const char* name;
- // The C++ (native) entry point.
+ // The C++ (native) entry point. NULL if the function is inlined.
byte* entry;
- // The number of arguments expected; nargs < 0 if variable no. of
- // arguments.
+ // The number of arguments expected. nargs is -1 if the function takes
+ // a variable number of arguments.
int nargs;
- int stub_id;
- // Size of result, if complex (larger than a single pointer),
- // otherwise zero.
+ // Size of result. Most functions return a single pointer, size 1.
int result_size;
};
- // Get the runtime function with the given function id.
- static Function* FunctionForId(FunctionId fid);
+ static const int kNotFound = -1;
- // Get the runtime function with the given name.
- static Function* FunctionForName(Vector<const char> name);
+ // Add symbols for all the intrinsic function names to a StringDictionary.
+ // Returns failure if an allocation fails. In this case, it must be
+ // retried with a new, empty StringDictionary, not with the same one.
+ // Alternatively, heap initialization can be completely restarted.
+ static Object* InitializeIntrinsicFunctionNames(Object* dictionary);
+ // Get the intrinsic function with the given name, which must be a symbol.
+ static Function* FunctionForSymbol(Handle<String> name);
+
+ // Get the intrinsic function with the given FunctionId.
+ static Function* FunctionForId(FunctionId id);
+
+ // General-purpose helper functions for runtime system.
static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
static bool IsUpperCaseChar(uint16_t ch);
diff --git a/src/unicode.h b/src/unicode.h
index a3b799e..9d1d683 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -120,6 +120,9 @@
static inline unsigned Encode(char* out, uchar c);
static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
unsigned capacity, unsigned* chars_read, unsigned* offset);
+ static uchar CalculateValue(const byte* str,
+ unsigned length,
+ unsigned* cursor);
static const uchar kBadChar = 0xFFFD;
static const unsigned kMaxEncodedSize = 4;
static const unsigned kMaxOneByteChar = 0x7f;
@@ -133,9 +136,6 @@
static inline uchar ValueOf(const byte* str,
unsigned length,
unsigned* cursor);
- static uchar CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor);
};
// --- C h a r a c t e r S t r e a m ---
diff --git a/src/utils.h b/src/utils.h
index d605891..fefbfe9 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -391,6 +391,12 @@
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(NULL, 0); }
+ template<typename S>
+ static Vector<T> cast(Vector<S> input) {
+ return Vector<T>(reinterpret_cast<T*>(input.start()),
+ input.length() * sizeof(S) / sizeof(T));
+ }
+
protected:
void set_start(T* start) { start_ = start; }
diff --git a/src/version.cc b/src/version.cc
index f105cbd..c144ade 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 4
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index c75b945..b480412 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1404,33 +1404,35 @@
Label slow, done;
if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
+ if (include_smi_code_) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+ if (negative_zero_ == kIgnoreNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(equal, &done);
+ }
+ __ SmiNeg(rax, rax, &done);
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated. We handle negative zero here if required. We always enter
+ // the runtime system if we have Smi::kMinValue.
+ if (negative_zero_ == kStrictNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+ } else {
+ __ SmiCompare(rax, Smi::FromInt(Smi::kMinValue));
+ __ j(equal, &slow);
+ __ jmp(&done);
+ }
+ // Try floating point case.
+ __ bind(&try_float);
+ } else if (FLAG_debug_code) {
+ __ AbortIfSmi(rax);
}
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
-
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- if (negative_zero_ == kStrictNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
- } else {
- __ jmp(&slow);
- }
-
- // Try floating point case.
- __ bind(&try_float);
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow);
@@ -1449,6 +1451,17 @@
__ movq(rax, rcx);
}
} else if (op_ == Token::BIT_NOT) {
+ if (include_smi_code_) {
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+ __ SmiNot(rax, rax);
+ __ jmp(&done);
+ // Try floating point case.
+ __ bind(&try_float);
+ } else if (FLAG_debug_code) {
+ __ AbortIfSmi(rax);
+ }
+
// Check if the operand is a heap number.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
@@ -2115,6 +2128,26 @@
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
Label check_unequal_objects, done;
+
+ // Compare two smis if required.
+ if (include_smi_compare_) {
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ neg(rdx); // Correct sign in case of overflow.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ Label ok;
+ __ JumpIfNotSmi(rdx, &ok);
+ __ JumpIfNotSmi(rax, &ok);
+ __ Abort("CompareStub: smi operands");
+ __ bind(&ok);
+ }
+
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -3001,7 +3034,8 @@
| RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
}
@@ -3041,12 +3075,18 @@
include_number_compare_name = "_NO_NUMBER";
}
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s",
cc_name,
strict_name,
never_nan_nan_name,
- include_number_compare_name);
+ include_number_compare_name,
+ include_smi_compare_name);
return name_;
}
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b1dd45e..91d9ff0 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1940,6 +1940,19 @@
}
+static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
+ bool inline_number_compare) {
+ CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
+ if (nan_info == kCantBothBeNaN) {
+ flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
+ }
+ if (inline_number_compare) {
+ flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
+ }
+ return flags;
+}
+
+
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -2070,7 +2083,9 @@
// Setup and call the compare stub.
is_not_string.Bind(&left_side);
- CompareStub stub(cc, strict, kCantBothBeNaN);
+ CompareFlags flags =
+ static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+ CompareStub stub(cc, strict, flags);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ testq(result.reg(), result.reg());
@@ -2174,7 +2189,8 @@
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
+ CompareStub stub(cc, strict, flags);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
answer.Unuse();
@@ -2207,7 +2223,9 @@
// End of in-line compare, call out to the compare stub. Don't include
// number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ CompareFlags flags =
+ ComputeCompareFlags(nan_info, inline_number_compare);
+ CompareStub stub(cc, strict, flags);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
answer.Unuse();
@@ -2332,7 +2350,9 @@
}
// Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
+ CompareFlags flags =
+ static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+ CompareStub stub(cc, strict, flags);
Result result = frame_->CallStub(&stub, left_side, right_side);
result.ToRegister();
__ testq(result.reg(), result.reg());
@@ -7395,6 +7415,7 @@
GenericUnaryOpStub stub(
Token::SUB,
overwrite,
+ NO_UNARY_FLAGS,
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -7413,7 +7434,9 @@
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ GenericUnaryOpStub stub(Token::BIT_NOT,
+ overwrite,
+ NO_UNARY_SMI_CODE_IN_STUB);
Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 07bdadf..6e1dd72 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -343,15 +343,17 @@
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
private:
+ // Type of a member function that generates inline code for a native function.
+ typedef void (CodeGenerator::*InlineFunctionGenerator)
+ (ZoneList<Expression*>*);
+
+ static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -584,12 +586,9 @@
void CheckStack();
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+ static InlineFunctionGenerator FindInlineFunctionGenerator(
+ Runtime::FunctionId function_id);
+
bool CheckForInlineRuntimeCall(CallRuntime* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -742,8 +741,6 @@
// in a spilled state.
bool in_spilled_code_;
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 40e1e35..e4faafc 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -677,9 +677,10 @@
VisitForValue(clause->label(), kAccumulator);
// Perform the comparison as if via '==='.
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ if (inline_smi_code) {
Label slow_case;
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
__ JumpIfNotBothSmi(rdx, rax, &slow_case);
__ SmiCompare(rdx, rax);
__ j(not_equal, &next_test);
@@ -688,7 +689,10 @@
__ bind(&slow_case);
}
- CompareStub stub(equal, true);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(equal, true, flags);
__ CallStub(&stub);
__ testq(rax, rax);
__ j(not_equal, &next_test);
@@ -938,6 +942,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode);
+ __ nop(); // Signal no inlined code.
}
@@ -2954,7 +2959,7 @@
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register rax.
VisitForValue(expr->expression(), kAccumulator);
@@ -2969,7 +2974,8 @@
// in the accumulator register rax.
VisitForValue(expr->expression(), kAccumulator);
Label done;
- if (ShouldInlineSmiCase(expr->op())) {
+ bool inline_smi_case = ShouldInlineSmiCase(expr->op());
+ if (inline_smi_case) {
Label call_stub;
__ JumpIfNotSmi(rax, &call_stub);
__ SmiNot(rax, rax);
@@ -2979,7 +2985,10 @@
bool overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode mode =
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+ UnaryOpFlags flags = inline_smi_case
+ ? NO_UNARY_SMI_CODE_IN_STUB
+ : NO_UNARY_FLAGS;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
__ CallStub(&stub);
__ bind(&done);
Apply(context_, rax);
@@ -3174,6 +3183,7 @@
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop(); // Signal no inlined code.
if (where == kStack) __ push(rax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
@@ -3361,7 +3371,8 @@
UNREACHABLE();
}
- if (ShouldInlineSmiCase(op)) {
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ if (inline_smi_code) {
Label slow_case;
__ JumpIfNotBothSmi(rax, rdx, &slow_case);
__ SmiCompare(rdx, rax);
@@ -3369,7 +3380,10 @@
__ bind(&slow_case);
}
- CompareStub stub(cc, strict);
+ CompareFlags flags = inline_smi_code
+ ? NO_SMI_COMPARE_IN_STUB
+ : NO_COMPARE_FLAGS;
+ CompareStub stub(cc, strict, flags);
__ CallStub(&stub);
__ testq(rax, rax);
Split(cc, if_true, if_false, fall_through);