Push version 3.0.3 to trunk.
Reapplied all changes for version 3.0.1.
Improved debugger protocol for remote debugging.
Added experimental support for using gyp to generate build files for V8.
Fixed implementation of String::Write in the API (issue 975).
git-svn-id: http://v8.googlecode.com/svn/trunk@6061 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/allocation.h b/src/allocation.h
index 6f4bd2f..394366e 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -28,6 +28,9 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
+#include "checks.h"
+#include "globals.h"
+
namespace v8 {
namespace internal {
diff --git a/src/api.cc b/src/api.cc
index 0ec8cf1..b85d658 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1165,14 +1165,22 @@
ScriptData* ScriptData::PreCompile(const char* input, int length) {
- unibrow::Utf8InputBuffer<> buf(input, length);
- return i::ParserApi::PreParse(i::Handle<i::String>(), &buf, NULL);
+ i::Utf8ToUC16CharacterStream stream(
+ reinterpret_cast<const unsigned char*>(input), length);
+ return i::ParserApi::PreParse(&stream, NULL);
}
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
- return i::ParserApi::PreParse(str, NULL, NULL);
+ if (str->IsExternalTwoByteString()) {
+ i::ExternalTwoByteStringUC16CharacterStream stream(
+ i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
+ return i::ParserApi::PreParse(&stream, NULL);
+ } else {
+ i::GenericStringUC16CharacterStream stream(str, 0, str->length());
+ return i::ParserApi::PreParse(&stream, NULL);
+ }
}
@@ -3119,14 +3127,15 @@
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
}
- int end = length;
- if ( (length == -1) || (length > str->length() - start) )
- end = str->length() - start;
+ int end = start + length;
+ if ((length == -1) || (length > str->length() - start) )
+ end = str->length();
if (end < 0) return 0;
i::String::WriteToFlat(*str, buffer, start, end);
- if (length == -1 || end < length)
- buffer[end] = '\0';
- return end;
+ if (length == -1 || end - start < length) {
+ buffer[end - start] = '\0';
+ }
+ return end - start;
}
@@ -4939,7 +4948,8 @@
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
- HeapSnapshot::Type type) {
+ HeapSnapshot::Type type,
+ ActivityControl* control) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
@@ -4953,7 +4963,8 @@
UNREACHABLE();
}
return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
+ i::HeapProfiler::TakeSnapshot(
+ *Utils::OpenHandle(*title), internal_type, control));
}
#endif // ENABLE_LOGGING_AND_PROFILING
@@ -4968,6 +4979,7 @@
}
int Testing::GetStressRuns() {
+ if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG
// In debug mode the code runs much slower so stressing will only make two
// runs.
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 2b0b324..59bc14e 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -5592,6 +5592,12 @@
__ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz);
+ // Check that both indices are valid.
+ __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
+ __ cmp(tmp2, index1);
+ __ cmp(tmp2, index2, hi);
+ deferred->Branch(ls);
+
// Bring the offsets into the fixed array in tmp1 into index1 and
// index2.
__ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -6463,7 +6469,7 @@
case Token::INSTANCEOF: {
Load(left);
Load(right);
- InstanceofStub stub;
+ InstanceofStub stub(InstanceofStub::kNoFlags);
frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
__ tst(r0, Operand(r0));
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 7e4a280..d254918 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -206,6 +206,11 @@
}
+void FullCodeGenerator::ClearAccumulator() {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+}
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
@@ -890,7 +895,9 @@
__ bind(&update_each);
__ mov(result_register(), r3);
// Perform the assignment as if via '='.
- EmitAssignment(stmt->each());
+ { EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->AssignmentId());
+ }
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1444,7 +1451,7 @@
// For property compound assignments we need another deoptimization
// point after the property load.
if (property != NULL) {
- PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
}
Token::Value op = expr->binary_op();
@@ -1487,6 +1494,8 @@
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r0);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1536,7 +1545,7 @@
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1584,6 +1593,8 @@
break;
}
}
+ PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+ context()->Plug(r0);
}
@@ -1657,8 +1668,6 @@
}
__ bind(&done);
}
-
- context()->Plug(result_register());
}
@@ -1701,10 +1710,10 @@
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
- context()->DropAndPlug(1, r0);
- } else {
- context()->Plug(r0);
+ __ Drop(1);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r0);
}
@@ -1745,10 +1754,10 @@
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
- context()->DropAndPlug(1, r0);
- } else {
- context()->Plug(r0);
+ __ Drop(1);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r0);
}
@@ -3200,6 +3209,8 @@
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(r0);
}
// For all contexts except EffectConstant We have the result on
// top of the stack.
@@ -3209,6 +3220,8 @@
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r0);
}
break;
case NAMED_PROPERTY: {
@@ -3216,6 +3229,7 @@
__ pop(r1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3230,6 +3244,7 @@
__ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3415,7 +3430,7 @@
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub;
+ InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
// The stub returns 0 for true.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a75d96b..e5a1bae 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -2360,10 +2360,8 @@
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
-#ifdef DEBUG
State previous_state = GetState();
-#endif
- State state = TargetState(x, y);
+ State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
@@ -2383,6 +2381,12 @@
#endif
}
+
+void PatchInlinedSmiCode(Address address) {
+ UNIMPLEMENTED();
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 682c448..9c79231 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -206,6 +206,13 @@
}
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+ stream->Add("if is_object(");
+ input()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_smi(");
input()->PrintTo(stream);
@@ -460,12 +467,6 @@
}
-int LChunk::NearestNextGapPos(int index) const {
- while (!IsGapAt(index)) index++;
- return index;
-}
-
-
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
}
@@ -1244,6 +1245,17 @@
temp,
first_id,
second_id);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+ temp1,
+ temp2,
+ first_id,
+ second_id);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@@ -1347,7 +1359,7 @@
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- MathFunctionId op = instr->op();
+ BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
LInstruction* result = new LUnaryMathOperation(input);
switch (op) {
@@ -1357,6 +1369,12 @@
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
+ case kMathPowHalf:
+ Abort("MathPowHalf LUnaryMathOperation not implemented");
+ return NULL;
+ case kMathLog:
+ Abort("MathLog LUnaryMathOperation not implemented");
+ return NULL;
default:
UNREACHABLE();
return NULL;
@@ -1554,6 +1572,12 @@
}
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ Abort("LPower instruction not implemented on ARM");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
if (instr->left()->representation().IsInteger32()) {
@@ -1594,6 +1618,14 @@
}
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsObject(value, TempRegister()));
+}
+
+
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
@@ -1688,11 +1720,13 @@
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
- // Make sure that temp and result_temp are different registers.
+ // Make sure that the temp and result_temp registers are
+ // different.
LUnallocated* result_temp = TempRegister();
- LInstruction* result = new LNumberTagD(value, temp);
+ LInstruction* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 0d5ba0f..41209c6 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -121,6 +121,8 @@
// LInteger32ToDouble
// LIsNull
// LIsNullAndBranch
+// LIsObject
+// LIsObjectAndBranch
// LIsSmi
// LIsSmiAndBranch
// LLoadNamedField
@@ -203,6 +205,8 @@
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
+ V(IsObject) \
+ V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(HasInstanceType) \
@@ -665,7 +669,7 @@
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
- MathFunctionId op() const { return hydrogen()->op(); }
+ BuiltinFunctionId op() const { return hydrogen()->op(); }
};
@@ -742,6 +746,48 @@
};
+class LIsObject: public LUnaryOperation {
+ public:
+ LIsObject(LOperand* value, LOperand* temp)
+ : LUnaryOperation(value), temp_(temp) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+ LOperand* temp() const { return temp_; }
+
+ private:
+ LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+ LIsObjectAndBranch(LOperand* value,
+ LOperand* temp,
+ LOperand* temp2,
+ int true_block_id,
+ int false_block_id)
+ : LIsObject(value, temp),
+ temp2_(temp2),
+ true_block_id_(true_block_id),
+ false_block_id_(false_block_id) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ virtual void PrintDataTo(StringStream* stream) const;
+ virtual bool IsControl() const { return true; }
+
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+
+ LOperand* temp2() const { return temp2_; }
+
+ private:
+ LOperand* temp2_;
+ int true_block_id_;
+ int false_block_id_;
+};
+
+
class LIsSmi: public LUnaryOperation {
public:
explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
@@ -1395,15 +1441,17 @@
class LNumberTagD: public LUnaryOperation {
public:
- explicit LNumberTagD(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2)
+ : LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- LOperand* temp() const { return temp_; }
+ LOperand* temp1() const { return temp1_; }
+ LOperand* temp2() const { return temp2_; }
private:
- LOperand* temp_;
+ LOperand* temp1_;
+ LOperand* temp2_;
};
@@ -1887,7 +1935,6 @@
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
- int NearestNextGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index db8037a..533d32c 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -136,7 +136,7 @@
Label loop;
__ bind(&loop);
__ push(r2);
- __ sub(r0, r0, Operand(1));
+ __ sub(r0, r0, Operand(1), SetCC);
__ b(ne, &loop);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
@@ -1213,6 +1213,26 @@
}
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object) {
+ Abort("EmitIsObject unimplemented.");
+ return ne;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+ Abort("DoIsObject unimplemented.");
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Abort("DoIsObjectAndBranch unimplemented.");
+}
+
+
void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
@@ -1733,13 +1753,14 @@
DoubleRegister input_reg = ToDoubleRegister(instr->input());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
Register scratch = r9;
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, tmp, ip, scratch, deferred->entry());
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ jmp(deferred->entry());
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 846acac..541a699 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -208,6 +208,15 @@
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6ad8918..6effec1 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1060,9 +1060,14 @@
return;
}
+ // Assert that the register arguments are different and that none of
+ // them are ip. ip is used explicitly in the code generated below.
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!result.is(ip));
+ ASSERT(!scratch1.is(ip));
+ ASSERT(!scratch2.is(ip));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 74ffd3b..c2a9796 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -2112,8 +2112,8 @@
// -- lr : return address
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, NULL, function, name);
Object* result;
@@ -2323,8 +2323,8 @@
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, cell, function, name);
Object* result;
diff --git a/src/array.js b/src/array.js
index c5ff505..a805157 100644
--- a/src/array.js
+++ b/src/array.js
@@ -159,9 +159,11 @@
}
-function ConvertToString(e) {
- if (e == null) return '';
- else return ToString(e);
+function ConvertToString(x) {
+ if (IS_STRING(x)) return x;
+ if (IS_NUMBER(x)) return %_NumberToString(x);
+ if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
+ return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
}
@@ -365,14 +367,13 @@
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
- separator = ToString(separator);
+ separator = NonStringToString(separator);
}
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
- var length = TO_UINT32(this.length);
- return Join(this, length, separator, ConvertToString);
+ return Join(this, TO_UINT32(this.length), separator, ConvertToString);
}
diff --git a/src/assembler.cc b/src/assembler.cc
index d71a35a..3b44efa 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -66,6 +66,7 @@
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
+const double DoubleConstant::negative_infinity = -V8_INFINITY;
// -----------------------------------------------------------------------------
@@ -722,6 +723,12 @@
}
+ExternalReference ExternalReference::address_of_negative_infinity() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::negative_infinity)));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
@@ -793,6 +800,51 @@
}
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+double power_double_int(double x, int y) {
+ double m = (y < 0) ? 1 / x : x;
+ unsigned n = (y < 0) ? -y : y;
+ double p = 1;
+ while (n != 0) {
+ if ((n & 1) != 0) p *= m;
+ m *= m;
+ if ((n & 2) != 0) p *= m;
+ m *= m;
+ n >>= 2;
+ }
+ return p;
+}
+
+
+double power_double_double(double x, double y) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
+ }
+ if (!isinf(x)) {
+ if (y == 0.5) return sqrt(x);
+ if (y == -0.5) return 1.0 / sqrt(x);
+ }
+ if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+ return OS::nan_value();
+ }
+ return pow(x, y);
+}
+
+
+ExternalReference ExternalReference::power_double_double_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double)));
+}
+
+
+ExternalReference ExternalReference::power_double_int_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int)));
+}
+
+
static int native_compare_doubles(double y, double x) {
if (x == y) return EQUAL;
return x < y ? LESS : GREATER;
diff --git a/src/assembler.h b/src/assembler.h
index 82c9fc2..72a9b15 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -50,6 +50,7 @@
public:
static const double min_int;
static const double one_half;
+ static const double negative_infinity;
};
@@ -539,6 +540,8 @@
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
+ static ExternalReference power_double_double_function();
+ static ExternalReference power_double_int_function();
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
@@ -549,6 +552,7 @@
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
+ static ExternalReference address_of_negative_infinity();
Address address() const {return reinterpret_cast<Address>(address_);}
@@ -710,6 +714,10 @@
return num_bits_set;
}
+// Computes pow(x, y) with the special cases in the spec for Math.pow.
+double power_double_int(double x, int y);
+double power_double_double(double x, double y);
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/src/ast-inl.h b/src/ast-inl.h
index e88156d..eb81c3a 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -71,14 +71,16 @@
: IterationStatement(labels),
cond_(NULL),
condition_position_(-1),
- next_id_(GetNextId()) {
+ continue_id_(GetNextId()),
+ back_edge_id_(GetNextId()) {
}
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
- may_have_function_literal_(true) {
+ may_have_function_literal_(true),
+ body_id_(GetNextId()) {
}
@@ -89,12 +91,14 @@
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
- next_id_(GetNextId()) {
+ continue_id_(GetNextId()),
+ body_id_(GetNextId()) {
}
ForInStatement::ForInStatement(ZoneStringList* labels)
- : IterationStatement(labels), each_(NULL), enumerable_(NULL) {
+ : IterationStatement(labels), each_(NULL), enumerable_(NULL),
+ assignment_id_(GetNextId()) {
}
diff --git a/src/ast.cc b/src/ast.cc
index c1ea0a8..895ab67 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -32,6 +32,7 @@
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -125,17 +126,18 @@
target_(target),
value_(value),
pos_(pos),
- compound_bailout_id_(kNoNumber),
+ binary_operation_(NULL),
+ compound_load_id_(kNoNumber),
+ assignment_id_(GetNextId()),
block_start_(false),
block_end_(false),
is_monomorphic_(false),
receiver_types_(NULL) {
ASSERT(Token::IsAssignmentOp(op));
- binary_operation_ = is_compound()
- ? new BinaryOperation(binary_op(), target, value, pos + 1)
- : NULL;
if (is_compound()) {
- compound_bailout_id_ = GetNextId();
+ binary_operation_ =
+ new BinaryOperation(binary_op(), target, value, pos + 1);
+ compound_load_id_ = GetNextId();
}
}
@@ -558,16 +560,18 @@
static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
+ SharedFunctionInfo* info = target->shared();
if (target->NeedsArgumentsAdaption()) {
// If the number of formal parameters of the target function
// does not match the number of arguments we're passing, we
// don't want to deal with it.
- return target->shared()->formal_parameter_count() == arity;
+ return info->formal_parameter_count() == arity;
} else {
// If the target doesn't need arguments adaption, we can call
// it directly, but we avoid to do so if it has a custom call
// generator, because that is likely to generate better code.
- return !target->shared()->HasCustomCallGenerator();
+ return !info->HasBuiltinFunctionId() ||
+ !CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
}
}
diff --git a/src/ast.h b/src/ast.h
index cdf456f..ed447e3 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -435,7 +435,6 @@
virtual IterationStatement* AsIterationStatement() { return this; }
Statement* body() const { return body_; }
- void set_body(Statement* stmt) { body_ = stmt; }
// Bailout support.
int OsrEntryId() const { return osr_entry_id_; }
@@ -477,12 +476,14 @@
void set_condition_position(int pos) { condition_position_ = pos; }
// Bailout support.
- virtual int ContinueId() const { return next_id_; }
+ virtual int ContinueId() const { return continue_id_; }
+ int BackEdgeId() const { return back_edge_id_; }
private:
Expression* cond_;
int condition_position_;
- int next_id_;
+ int continue_id_;
+ int back_edge_id_;
};
@@ -507,11 +508,13 @@
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
+ int BodyId() const { return body_id_; }
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
+ int body_id_;
};
@@ -532,11 +535,8 @@
}
Statement* init() const { return init_; }
- void set_init(Statement* stmt) { init_ = stmt; }
Expression* cond() const { return cond_; }
- void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; }
- void set_next(Statement* stmt) { next_ = stmt; }
bool may_have_function_literal() const {
return may_have_function_literal_;
@@ -546,7 +546,8 @@
}
// Bailout support.
- virtual int ContinueId() const { return next_id_; }
+ virtual int ContinueId() const { return continue_id_; }
+ int BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
@@ -559,7 +560,8 @@
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- int next_id_;
+ int continue_id_;
+ int body_id_;
};
@@ -579,11 +581,13 @@
Expression* enumerable() const { return enumerable_; }
// Bailout support.
+ int AssignmentId() const { return assignment_id_; }
virtual int ContinueId() const { return EntryId(); }
private:
Expression* each_;
Expression* enumerable_;
+ int assignment_id_;
};
@@ -737,7 +741,10 @@
Statement* else_statement)
: condition_(condition),
then_statement_(then_statement),
- else_statement_(else_statement) { }
+ else_statement_(else_statement),
+ then_id_(GetNextId()),
+ else_id_(GetNextId()) {
+ }
DECLARE_NODE_TYPE(IfStatement)
@@ -748,14 +755,17 @@
Expression* condition() const { return condition_; }
Statement* then_statement() const { return then_statement_; }
- void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
Statement* else_statement() const { return else_statement_; }
- void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
+
+ int ThenId() const { return then_id_; }
+ int ElseId() const { return else_id_; }
private:
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
+ int then_id_;
+ int else_id_;
};
@@ -1380,6 +1390,9 @@
int pos)
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
ASSERT(Token::IsBinaryOp(op));
+ right_id_ = (op == Token::AND || op == Token::OR)
+ ? GetNextId()
+ : AstNode::kNoNumber;
}
// Create the binary operation corresponding to a compound assignment.
@@ -1400,12 +1413,18 @@
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiOnly() const { return is_smi_only_; }
+ // Bailout support.
+ int RightId() const { return right_id_; }
+
private:
Token::Value op_;
Expression* left_;
Expression* right_;
int pos_;
bool is_smi_only_;
+ // The short-circuit logical operations have an AST ID for their
+ // right-hand subexpression.
+ int right_id_;
};
@@ -1432,7 +1451,9 @@
class CountOperation: public Expression {
public:
CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
- : is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
+ : is_prefix_(is_prefix), increment_(increment), pos_(pos),
+ assignment_id_(GetNextId()) {
+ }
DECLARE_NODE_TYPE(CountOperation)
@@ -1452,10 +1473,14 @@
virtual bool IsInlineable() const;
+ // Bailout support.
+ int AssignmentId() const { return assignment_id_; }
+
private:
bool is_prefix_;
IncrementOperation* increment_;
int pos_;
+ int assignment_id_;
};
@@ -1524,7 +1549,10 @@
then_expression_(then_expression),
else_expression_(else_expression),
then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position) { }
+ else_expression_position_(else_expression_position),
+ then_id_(GetNextId()),
+ else_id_(GetNextId()) {
+ }
DECLARE_NODE_TYPE(Conditional)
@@ -1534,8 +1562,11 @@
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
- int then_expression_position() { return then_expression_position_; }
- int else_expression_position() { return else_expression_position_; }
+ int then_expression_position() const { return then_expression_position_; }
+ int else_expression_position() const { return else_expression_position_; }
+
+ int ThenId() const { return then_id_; }
+ int ElseId() const { return else_id_; }
private:
Expression* condition_;
@@ -1543,6 +1574,8 @@
Expression* else_expression_;
int then_expression_position_;
int else_expression_position_;
+ int then_id_;
+ int else_id_;
};
@@ -1585,7 +1618,8 @@
}
// Bailout support.
- int compound_bailout_id() const { return compound_bailout_id_; }
+ int CompoundLoadId() const { return compound_load_id_; }
+ int AssignmentId() const { return assignment_id_; }
private:
Token::Value op_;
@@ -1593,7 +1627,8 @@
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
- int compound_bailout_id_;
+ int compound_load_id_;
+ int assignment_id_;
bool block_start_;
bool block_end_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 800c437..cae1a9a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -38,7 +38,6 @@
#include "natives.h"
#include "objects-visiting.h"
#include "snapshot.h"
-#include "stub-cache.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
@@ -234,7 +233,7 @@
// Used for creating a context from scratch.
void InstallNativeFunctions();
bool InstallNatives();
- void InstallCustomCallGenerators();
+ void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
@@ -1270,7 +1269,7 @@
global_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
- InstallCustomCallGenerators();
+ InstallBuiltinFunctionIds();
// Install Function.prototype.call and apply.
{ Handle<String> key = Factory::function_class_symbol();
@@ -1369,7 +1368,7 @@
}
-static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> global_context,
const char* holder_expr) {
Handle<GlobalObject> global(global_context->global());
@@ -1387,9 +1386,9 @@
}
-static void InstallCustomCallGenerator(Handle<JSObject> holder,
- const char* function_name,
- int id) {
+static void InstallBuiltinFunctionId(Handle<JSObject> holder,
+ const char* function_name,
+ BuiltinFunctionId id) {
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
@@ -1397,17 +1396,17 @@
}
-void Genesis::InstallCustomCallGenerators() {
+void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
- { \
- Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
- global_context(), #holder_expr); \
- const int id = CallStubCompiler::k##name##CallGenerator; \
- InstallCustomCallGenerator(holder, #fun_name, id); \
+#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
+ { \
+ Handle<JSObject> holder = ResolveBuiltinIdHolder( \
+ global_context(), #holder_expr); \
+ BuiltinFunctionId id = k##name; \
+ InstallBuiltinFunctionId(holder, #fun_name, id); \
}
- CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
-#undef INSTALL_CALL_GENERATOR
+ FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
+#undef INSTALL_BUILTIN_ID
}
diff --git a/src/checks.h b/src/checks.h
index aa557f0..2bb94bb 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -30,6 +30,7 @@
#include <string.h>
+#include "../include/v8stdint.h"
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -231,6 +232,8 @@
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
// This is inspired by the static assertion facility in boost. This
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 1010e95..8ba9971 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -47,6 +47,7 @@
V(Compare) \
V(CompareIC) \
V(MathPow) \
+ V(TranscendentalCacheSSE2) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
@@ -325,13 +326,24 @@
class InstanceofStub: public CodeStub {
public:
- InstanceofStub() { }
+ enum Flags {
+ kNoFlags = 0,
+ kArgsInRegisters = 1 << 0
+ };
+
+ explicit InstanceofStub(Flags flags) : flags_(flags) { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
- int MinorKey() { return 0; }
+ int MinorKey() { return args_in_registers() ? 1 : 0; }
+
+ bool args_in_registers() {
+ return (flags_ & kArgsInRegisters) != 0;
+ }
+
+ Flags flags_;
};
diff --git a/src/compiler.cc b/src/compiler.cc
index 59a684c..e4864e4 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -116,13 +116,26 @@
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- if (!FLAG_trace_opt) return;
-
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
- PrintF("[optimizing: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
- PrintF(" - took %0.3f ms]\n", ms);
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
+ PrintF(" - took %0.3f ms]\n", ms);
+ }
+ if (FLAG_trace_opt_stats) {
+ static double compilation_time = 0.0;
+ static int compiled_functions = 0;
+ static int code_size = 0;
+
+ compilation_time += ms;
+ compiled_functions++;
+ code_size += function->shared()->SourceSize();
+ PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+ compiled_functions,
+ code_size,
+ compilation_time);
+ }
}
@@ -461,7 +474,14 @@
ScriptDataImpl* pre_data = input_pre_data;
if (pre_data == NULL
&& source_length >= FLAG_min_preparse_length) {
- pre_data = ParserApi::PartialPreParse(source, NULL, extension);
+ if (source->IsExternalTwoByteString()) {
+ ExternalTwoByteStringUC16CharacterStream stream(
+ Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+ pre_data = ParserApi::PartialPreParse(&stream, extension);
+ } else {
+ GenericStringUC16CharacterStream stream(source, 0, source->length());
+ pre_data = ParserApi::PartialPreParse(&stream, extension);
+ }
}
// Create a script object describing the script to be compiled.
diff --git a/src/d8.gyp b/src/d8.gyp
new file mode 100644
index 0000000..3283e38
--- /dev/null
+++ b/src/d8.gyp
@@ -0,0 +1,85 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'd8',
+ 'type': 'executable',
+ 'dependencies': [
+ 'd8_js2c#host',
+ '../tools/gyp/v8.gyp:v8',
+ ],
+ 'include_dirs+': [
+ '../src',
+ ],
+ 'defines': [
+ 'ENABLE_DEBUGGER_SUPPORT',
+ ],
+ 'sources': [
+ 'd8.cc',
+ 'd8-debug.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
+ ],
+ 'conditions': [
+ [ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+ 'sources': [ 'd8-posix.cc', ]
+ }],
+ ],
+ },
+ {
+ 'target_name': 'd8_js2c',
+ 'type': 'none',
+ 'toolsets': ['host'],
+ 'variables': {
+ 'js_files': [
+ 'd8.js',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'd8_js2c',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(js_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
+ ],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<@(_outputs)',
+ 'D8',
+ '<@(js_files)'
+ ],
+ },
+ ],
+ }
+ ],
+}
diff --git a/src/date.js b/src/date.js
index 9601470..38bb8eb 100644
--- a/src/date.js
+++ b/src/date.js
@@ -81,12 +81,12 @@
function InLeapYear(time) {
- return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
+ return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
- return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
+ return DAY(time) - DayFromYear(YearFromTime(time));
}
@@ -114,9 +114,9 @@
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
- var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
- MONTH_FROM_TIME(t),
- DATE_FROM_TIME(t));
+ var day = MakeDay(EquivalentYear(YearFromTime(t)),
+ MonthFromTime(t),
+ DateFromTime(t));
return MakeDate(day, TimeWithinDay(t));
}
@@ -253,9 +253,6 @@
function LocalTimeNoCheck(time) {
var ltc = ltcache;
if (%_ObjectEquals(time, ltc.key)) return ltc.val;
- if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
- return $NaN;
- }
// Inline the DST offset cache checks for speed.
// The cache is hit, or DaylightSavingsOffset is called,
@@ -371,16 +368,21 @@
// ECMA 262 - 15.9.1.13
function MakeDate(day, time) {
- if (!$isFinite(day)) return $NaN;
- if (!$isFinite(time)) return $NaN;
- return day * msPerDay + time;
+ var time = day * msPerDay + time;
+ // Some of our runtime funtions for computing UTC(time) rely on
+ // times not being significantly larger than MAX_TIME_MS. If there
+ // is no way that the time can be within range even after UTC
+ // conversion we return NaN immediately instead of relying on
+ // TimeClip to do it.
+ if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
+ return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
if (!$isFinite(time)) return $NaN;
- if ($abs(time) > 8.64E15) return $NaN;
+ if ($abs(time) > MAX_TIME_MS) return $NaN;
return TO_INTEGER(time);
}
@@ -424,7 +426,7 @@
value = DateParse(year);
if (!NUMBER_IS_NAN(value)) {
cache.time = value;
- cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
+ cache.year = YearFromTime(LocalTimeNoCheck(value));
cache.string = year;
}
}
@@ -642,7 +644,7 @@
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
- return YEAR_FROM_TIME(LocalTimeNoCheck(t));
+ return YearFromTime(LocalTimeNoCheck(t));
}
@@ -650,7 +652,7 @@
function DateGetUTCFullYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
- return YEAR_FROM_TIME(t);
+ return YearFromTime(t);
}
@@ -658,7 +660,7 @@
function DateGetMonth() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
- return MONTH_FROM_TIME(LocalTimeNoCheck(t));
+ return MonthFromTime(LocalTimeNoCheck(t));
}
@@ -666,7 +668,7 @@
function DateGetUTCMonth() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
- return MONTH_FROM_TIME(t);
+ return MonthFromTime(t);
}
@@ -674,7 +676,7 @@
function DateGetDate() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
- return DATE_FROM_TIME(LocalTimeNoCheck(t));
+ return DateFromTime(LocalTimeNoCheck(t));
}
@@ -869,7 +871,7 @@
function DateSetDate(date) {
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
- var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+ var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -878,7 +880,7 @@
function DateSetUTCDate(date) {
var t = DATE_VALUE(this);
date = ToNumber(date);
- var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+ var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -888,7 +890,7 @@
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
- var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+ var day = MakeDay(YearFromTime(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -898,7 +900,7 @@
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
- var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+ var day = MakeDay(YearFromTime(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -909,8 +911,8 @@
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
- month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
- date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+ month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+ date = argc < 3 ? DateFromTime(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -922,8 +924,8 @@
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
- month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
- date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+ month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+ date = argc < 3 ? DateFromTime(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -935,9 +937,9 @@
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
- + TwoDigitString(DATE_FROM_TIME(t)) + ' '
- + Months[MONTH_FROM_TIME(t)] + ' '
- + YEAR_FROM_TIME(t) + ' '
+ + TwoDigitString(DateFromTime(t)) + ' '
+ + Months[MonthFromTime(t)] + ' '
+ + YearFromTime(t) + ' '
+ TimeString(t) + ' GMT';
}
@@ -946,7 +948,7 @@
function DateGetYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
- return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
+ return YearFromTime(LocalTimeNoCheck(t)) - 1900;
}
@@ -958,7 +960,7 @@
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
+ var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -984,16 +986,24 @@
function DateToISOString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
+ return this.getUTCFullYear() +
+ '-' + PadInt(this.getUTCMonth() + 1, 2) +
+ '-' + PadInt(this.getUTCDate(), 2) +
+ 'T' + PadInt(this.getUTCHours(), 2) +
+ ':' + PadInt(this.getUTCMinutes(), 2) +
+ ':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
}
function DateToJSON(key) {
- return CheckJSONPrimitive(this.toISOString());
+ var o = ToObject(this);
+ var tv = DefaultNumber(o);
+ if (IS_NUMBER(tv) && !$isFinite(tv)) {
+ return null;
+ }
+ return o.toISOString();
}
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index d091991..090c661 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -858,6 +858,7 @@
return debugger_flags;
};
+Debug.MakeMirror = MakeMirror;
function MakeExecutionState(break_id) {
return new ExecutionState(break_id);
@@ -876,9 +877,11 @@
return %PrepareStep(this.break_id, action, count);
}
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break) {
- return MakeMirror(
- %DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break)));
+ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
+ opt_additional_context) {
+ return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
+ Boolean(disable_break),
+ opt_additional_context));
};
ExecutionState.prototype.frameCount = function() {
@@ -1837,6 +1840,7 @@
var frame = request.arguments.frame;
var global = request.arguments.global;
var disable_break = request.arguments.disable_break;
+ var additional_context = request.arguments.additional_context;
// The expression argument could be an integer so we convert it to a
// string.
@@ -1850,12 +1854,30 @@
if (!IS_UNDEFINED(frame) && global) {
return response.failed('Arguments "frame" and "global" are exclusive');
}
+
+ var additional_context_object;
+ if (additional_context) {
+ additional_context_object = {};
+ for (var i = 0; i < additional_context.length; i++) {
+ var mapping = additional_context[i];
+ if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
+ return response.failed("Context element #" + i +
+ " must contain name:string and handle:number");
+ }
+ var context_value_mirror = LookupMirror(mapping.handle);
+ if (!context_value_mirror) {
+ return response.failed("Context object '" + mapping.name +
+ "' #" + mapping.handle + "# not found");
+ }
+ additional_context_object[mapping.name] = context_value_mirror.value();
+ }
+ }
// Global evaluate.
if (global) {
// Evaluate in the global context.
- response.body =
- this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
+ response.body = this.exec_state_.evaluateGlobal(
+ expression, Boolean(disable_break), additional_context_object);
return;
}
@@ -1877,12 +1899,12 @@
}
// Evaluate in the specified frame.
response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, Boolean(disable_break));
+ expression, Boolean(disable_break), additional_context_object);
return;
} else {
// Evaluate in the selected frame.
response.body = this.exec_state_.frame().evaluate(
- expression, Boolean(disable_break));
+ expression, Boolean(disable_break), additional_context_object);
return;
}
};
diff --git a/src/execution.cc b/src/execution.cc
index e88d9cd..11dacfe 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -720,7 +720,6 @@
return Top::TerminateExecution();
}
if (StackGuard::IsInterrupted()) {
- // interrupt
StackGuard::Continue(INTERRUPT);
return Top::StackOverflow();
}
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
new file mode 100644
index 0000000..22a1c91
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -0,0 +1,263 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "i18n-extension.h"
+
+#include <algorithm>
+#include <string>
+
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+I18NExtension* I18NExtension::extension_ = NULL;
+
+// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
+const char* const I18NExtension::kSource =
+ "Locale = function(optLocale) {"
+ " native function NativeJSLocale();"
+ " var properties = NativeJSLocale(optLocale);"
+ " this.locale = properties.locale;"
+ " this.language = properties.language;"
+ " this.script = properties.script;"
+ " this.region = properties.region;"
+ "};"
+ "Locale.availableLocales = function() {"
+ " native function NativeJSAvailableLocales();"
+ " return NativeJSAvailableLocales();"
+ "};"
+ "Locale.prototype.maximizedLocale = function() {"
+ " native function NativeJSMaximizedLocale();"
+ " return new Locale(NativeJSMaximizedLocale(this.locale));"
+ "};"
+ "Locale.prototype.minimizedLocale = function() {"
+ " native function NativeJSMinimizedLocale();"
+ " return new Locale(NativeJSMinimizedLocale(this.locale));"
+ "};"
+ "Locale.prototype.displayLocale_ = function(displayLocale) {"
+ " var result = this.locale;"
+ " if (displayLocale !== undefined) {"
+ " result = displayLocale.locale;"
+ " }"
+ " return result;"
+ "};"
+ "Locale.prototype.displayLanguage = function(optDisplayLocale) {"
+ " var displayLocale = this.displayLocale_(optDisplayLocale);"
+ " native function NativeJSDisplayLanguage();"
+ " return NativeJSDisplayLanguage(this.locale, displayLocale);"
+ "};"
+ "Locale.prototype.displayScript = function(optDisplayLocale) {"
+ " var displayLocale = this.displayLocale_(optDisplayLocale);"
+ " native function NativeJSDisplayScript();"
+ " return NativeJSDisplayScript(this.locale, displayLocale);"
+ "};"
+ "Locale.prototype.displayRegion = function(optDisplayLocale) {"
+ " var displayLocale = this.displayLocale_(optDisplayLocale);"
+ " native function NativeJSDisplayRegion();"
+ " return NativeJSDisplayRegion(this.locale, displayLocale);"
+ "};"
+ "Locale.prototype.displayName = function(optDisplayLocale) {"
+ " var displayLocale = this.displayLocale_(optDisplayLocale);"
+ " native function NativeJSDisplayName();"
+ " return NativeJSDisplayName(this.locale, displayLocale);"
+ "};";
+
+v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
+ v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::New("NativeJSLocale"))) {
+ return v8::FunctionTemplate::New(JSLocale);
+ } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
+ return v8::FunctionTemplate::New(JSAvailableLocales);
+ } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
+ return v8::FunctionTemplate::New(JSMaximizedLocale);
+ } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
+ return v8::FunctionTemplate::New(JSMinimizedLocale);
+ } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
+ return v8::FunctionTemplate::New(JSDisplayLanguage);
+ } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
+ return v8::FunctionTemplate::New(JSDisplayScript);
+ } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
+ return v8::FunctionTemplate::New(JSDisplayRegion);
+ } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
+ return v8::FunctionTemplate::New(JSDisplayName);
+ }
+
+ return v8::Handle<v8::FunctionTemplate>();
+}
+
+v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
+ // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
+ // We could possibly pass browser locale as a parameter in the constructor.
+ std::string locale_name("en-US");
+ if (args.Length() == 1 && args[0]->IsString()) {
+ locale_name = *v8::String::Utf8Value(args[0]->ToString());
+ }
+
+ v8::Local<v8::Object> locale = v8::Object::New();
+ locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
+
+ icu::Locale icu_locale(locale_name.c_str());
+
+ const char* language = icu_locale.getLanguage();
+ locale->Set(v8::String::New("language"), v8::String::New(language));
+
+ const char* script = icu_locale.getScript();
+ if (strlen(script)) {
+ locale->Set(v8::String::New("script"), v8::String::New(script));
+ }
+
+ const char* region = icu_locale.getCountry();
+ if (strlen(region)) {
+ locale->Set(v8::String::New("region"), v8::String::New(region));
+ }
+
+ return locale;
+}
+
+// TODO(cira): Filter out locales that Chrome doesn't support.
+v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
+ const v8::Arguments& args) {
+ v8::Local<v8::Array> all_locales = v8::Array::New();
+
+ int count = 0;
+ const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
+ for (int i = 0; i < count; ++i) {
+ all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
+ }
+
+ return all_locales;
+}
+
+// Use - as tag separator, not _ that ICU uses.
+static std::string NormalizeLocale(const std::string& locale) {
+ std::string result(locale);
+ // TODO(cira): remove STL dependency.
+ std::replace(result.begin(), result.end(), '_', '-');
+ return result;
+}
+
+v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
+ const v8::Arguments& args) {
+ if (!args.Length() || !args[0]->IsString()) {
+ return v8::Undefined();
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+ char max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(locale_name.c_str(), max_locale,
+ sizeof(max_locale), &status);
+ if (U_FAILURE(status)) {
+ return v8::Undefined();
+ }
+
+ return v8::String::New(NormalizeLocale(max_locale).c_str());
+}
+
+v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
+ const v8::Arguments& args) {
+ if (!args.Length() || !args[0]->IsString()) {
+ return v8::Undefined();
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+ char min_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_minimizeSubtags(locale_name.c_str(), min_locale,
+ sizeof(min_locale), &status);
+ if (U_FAILURE(status)) {
+ return v8::Undefined();
+ }
+
+ return v8::String::New(NormalizeLocale(min_locale).c_str());
+}
+
+// Common code for JSDisplayXXX methods.
+static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
+ const std::string& item) {
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+ return v8::Undefined();
+ }
+
+ std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
+ icu::Locale icu_locale(base_locale.c_str());
+ icu::Locale display_locale =
+ icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
+ UnicodeString result;
+ if (item == "language") {
+ icu_locale.getDisplayLanguage(display_locale, result);
+ } else if (item == "script") {
+ icu_locale.getDisplayScript(display_locale, result);
+ } else if (item == "region") {
+ icu_locale.getDisplayCountry(display_locale, result);
+ } else if (item == "name") {
+ icu_locale.getDisplayName(display_locale, result);
+ } else {
+ return v8::Undefined();
+ }
+
+ if (result.length()) {
+ return v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+ }
+
+ return v8::Undefined();
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
+ const v8::Arguments& args) {
+ return GetDisplayItem(args, "language");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
+ const v8::Arguments& args) {
+ return GetDisplayItem(args, "script");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
+ const v8::Arguments& args) {
+ return GetDisplayItem(args, "region");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
+ return GetDisplayItem(args, "name");
+}
+
+I18NExtension* I18NExtension::get() {
+ if (!extension_) {
+ extension_ = new I18NExtension();
+ }
+ return extension_;
+}
+
+void I18NExtension::Register() {
+ static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
+}
+
+} } // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
new file mode 100644
index 0000000..629332b
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+
+#include <v8.h>
+
+namespace v8 {
+namespace internal {
+
+
+class I18NExtension : public v8::Extension {
+ public:
+ I18NExtension() : v8::Extension("v8/i18n", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+
+ // Implementations of window.Locale methods.
+ static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
+ static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
+
+ // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
+ static void Register();
+ static I18NExtension* get();
+
+ private:
+ static const char* const kSource;
+ static I18NExtension* extension_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 37653a4..facbec2 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -141,6 +141,7 @@
DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
+DEFINE_int(stress_runs, 0, "number of stress runs")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
@@ -194,6 +195,7 @@
// codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation")
DEFINE_bool(trace_opt, false, "trace lazy optimization")
+DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
DEFINE_bool(always_opt, false, "always try to optimize functions")
@@ -456,8 +458,6 @@
"log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
-DEFINE_bool(compress_log, false,
- "Compress log to save space (makes log less human-readable).")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 4eb10c7..96307a3 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -761,6 +761,7 @@
context()->EmitLogicalLeft(expr, &eval_right, &done);
+ PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
__ bind(&eval_right);
if (context()->IsTest()) ForwardBailoutToChild(expr);
context()->HandleExpression(expr->right());
@@ -925,16 +926,21 @@
if (stmt->HasElseStatement()) {
VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
+ PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
__ jmp(&done);
+ PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
__ bind(&else_part);
Visit(stmt->else_statement());
} else {
VisitForControl(stmt->condition(), &then_part, &done, &then_part);
+ PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
+
+ PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
}
__ bind(&done);
PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
@@ -946,6 +952,11 @@
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
+ // When continuing, we clobber the unpredictable value in the accumulator
+ // with one that's safe for GC. If we hit an exit from the try block of
+ // try...finally on our way out, we will unconditionally preserve the
+ // accumulator on the stack.
+ ClearAccumulator();
while (!current->IsContinueTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
@@ -962,6 +973,11 @@
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
+ // When breaking, we clobber the unpredictable value in the accumulator
+ // with one that's safe for GC. If we hit an exit from the try block of
+ // try...finally on our way out, we will unconditionally preserve the
+ // accumulator on the stack.
+ ClearAccumulator();
while (!current->IsBreakTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
@@ -1043,12 +1059,13 @@
&stack_check);
// Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
EmitStackCheck(stmt);
__ jmp(&body);
- __ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -1063,6 +1080,7 @@
// Emit the test at the bottom of the loop.
__ jmp(&test);
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
@@ -1080,8 +1098,8 @@
loop_statement.break_target(),
loop_statement.break_target());
- __ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -1099,12 +1117,12 @@
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
- __ bind(loop_statement.continue_target());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
-
+ __ bind(loop_statement.continue_target());
SetStatementPosition(stmt);
if (stmt->next() != NULL) {
Visit(stmt->next());
@@ -1127,8 +1145,8 @@
__ jmp(&body);
}
- __ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -1235,7 +1253,10 @@
Visit(stmt->try_block());
__ PopTryHandler();
}
- // Execute the finally block on the way out.
+ // Execute the finally block on the way out. Clobber the unpredictable
+ // value in the accumulator with one that's safe for GC. The finally
+ // block will unconditionally preserve the accumulator on the stack.
+ ClearAccumulator();
__ Call(&finally_entry);
}
@@ -1256,6 +1277,7 @@
Label true_case, false_case, done;
VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
+ PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
expr->then_expression_position());
@@ -1270,6 +1292,7 @@
__ jmp(&done);
}
+ PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 8d9fe2d..0482ee8 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -38,6 +38,9 @@
namespace v8 {
namespace internal {
+// Forward declarations.
+class JumpPatchSite;
+
// AST node visitor which can tell whether a given statement will be breakable
// when the code is compiled by the full compiler in the debugger. This means
// that there will be an IC (load/store/call) in the code generated for the
@@ -283,6 +286,10 @@
static const InlineFunctionGenerator kInlineFunctionGenerators[];
+ // A platform-specific utility to overwrite the accumulator register
+ // with a GC-safe value.
+ void ClearAccumulator();
+
// Compute the frame pointer relative offset for a given local or
// parameter slot.
int SlotOffset(Slot* slot);
@@ -481,7 +488,7 @@
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator.
- void EmitAssignment(Expression* expr);
+ void EmitAssignment(Expression* expr, int bailout_ast_id);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
@@ -533,6 +540,10 @@
// Helper for calling an IC stub.
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+ // Calling an IC stub with a patch site. Passing NULL for patch_site
+ // indicates no inlined smi code and emits a nop after the IC call.
+ void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
+
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
diff --git a/src/globals.h b/src/globals.h
index b56b835..35156ae 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -28,6 +28,8 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
+#include "../include/v8stdint.h"
+
namespace v8 {
namespace internal {
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 8f6fb98..ef83998 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 91ac986..0029500 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2009-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -348,27 +348,34 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
+ int type,
+ v8::ActivityControl* control) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type);
+ return singleton_->TakeSnapshotImpl(name, type, control);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
+ int type,
+ v8::ActivityControl* control) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type);
+ return singleton_->TakeSnapshotImpl(name, type, control);
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
+ int type,
+ v8::ActivityControl* control) {
Heap::CollectAllGarbage(true);
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
+ bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result);
- generator.GenerateSnapshot();
+ HeapSnapshotGenerator generator(result, control);
+ generation_completed = generator.GenerateSnapshot();
break;
}
case HeapSnapshot::kAggregated: {
@@ -381,13 +388,19 @@
default:
UNREACHABLE();
}
- snapshots_->SnapshotGenerationFinished();
+ if (!generation_completed) {
+ delete result;
+ result = NULL;
+ }
+ snapshots_->SnapshotGenerationFinished(result);
return result;
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
- return TakeSnapshotImpl(snapshots_->GetName(name), type);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
+ int type,
+ v8::ActivityControl* control) {
+ return TakeSnapshotImpl(snapshots_->GetName(name), type, control);
}
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 2ef081e..90c664e 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2009-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -56,8 +56,12 @@
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
- static HeapSnapshot* TakeSnapshot(const char* name, int type);
- static HeapSnapshot* TakeSnapshot(String* name, int type);
+ static HeapSnapshot* TakeSnapshot(const char* name,
+ int type,
+ v8::ActivityControl* control);
+ static HeapSnapshot* TakeSnapshot(String* name,
+ int type,
+ v8::ActivityControl* control);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
@@ -75,8 +79,12 @@
private:
HeapProfiler();
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
- HeapSnapshot* TakeSnapshotImpl(String* name, int type);
+ HeapSnapshot* TakeSnapshotImpl(const char* name,
+ int type,
+ v8::ActivityControl* control);
+ HeapSnapshot* TakeSnapshotImpl(String* name,
+ int type,
+ v8::ActivityControl* control);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
diff --git a/src/heap.cc b/src/heap.cc
index 0497ad5..ccf9b47 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3757,14 +3757,21 @@
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
+ static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
+ static const int kGCsBetweenCleanup = 4;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
bool uncommit = true;
bool finished = false;
- if (last_gc_count == gc_count_) {
- number_idle_notifications++;
+ // Reset the number of idle notifications received when a number of
+ // GCs have taken place. This allows another round of cleanup based
+ // on idle notifications if enough work has been carried out to
+ // provoke a number of garbage collections.
+ if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
+ number_idle_notifications =
+ Min(number_idle_notifications + 1, kMaxIdleCount);
} else {
number_idle_notifications = 0;
last_gc_count = gc_count_;
@@ -3779,7 +3786,6 @@
}
new_space_.Shrink();
last_gc_count = gc_count_;
-
} else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
// Before doing the mark-sweep collections we clear the
// compilation cache to avoid hanging on to source code and
@@ -3794,7 +3800,6 @@
CollectAllGarbage(true);
new_space_.Shrink();
last_gc_count = gc_count_;
- number_idle_notifications = 0;
finished = true;
} else if (contexts_disposed_ > 0) {
@@ -3813,6 +3818,11 @@
number_idle_notifications = 0;
uncommit = false;
}
+ } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
+ // If we have received more than kIdlesBeforeMarkCompact idle
+ // notifications we do not perform any cleanup because we don't
+ // expect to gain much by doing so.
+ finished = true;
}
// Make sure that we have no pending context disposals and
diff --git a/src/heap.h b/src/heap.h
index e4dcb4a..fbcc70d 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1119,9 +1119,9 @@
static int contexts_disposed_;
#if defined(V8_TARGET_ARCH_X64)
- static const int kMaxObjectSizeInNewSpace = 512*KB;
+ static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
- static const int kMaxObjectSizeInNewSpace = 256*KB;
+ static const int kMaxObjectSizeInNewSpace = 512*KB;
#endif
static NewSpace new_space_;
@@ -2054,8 +2054,9 @@
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
- // Inline implementation of the caching.
+ // Inline implementation of the cache.
friend class TranscendentalCacheStub;
+ friend class TranscendentalCacheSSE2Stub;
static TranscendentalCache* caches_[kNumberOfCaches];
Element elements_[kCacheSize];
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 670dad8..a96ee40 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -64,69 +64,34 @@
}
-static int32_t AddAssertNoOverflow(int32_t a, int32_t b) {
- ASSERT(static_cast<int64_t>(a + b) == (static_cast<int64_t>(a) +
- static_cast<int64_t>(b)));
- return a + b;
-}
-
-
-static int32_t SubAssertNoOverflow(int32_t a, int32_t b) {
- ASSERT(static_cast<int64_t>(a - b) == (static_cast<int64_t>(a) -
- static_cast<int64_t>(b)));
- return a - b;
-}
-
-
-static int32_t MulAssertNoOverflow(int32_t a, int32_t b) {
- ASSERT(static_cast<int64_t>(a * b) == (static_cast<int64_t>(a) *
- static_cast<int64_t>(b)));
- return a * b;
-}
-
-
-static int32_t AddWithoutOverflow(int32_t a, int32_t b) {
- if (b > 0) {
- if (a <= kMaxInt - b) return AddAssertNoOverflow(a, b);
+static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
+ if (result > kMaxInt) {
+ *overflow = true;
return kMaxInt;
- } else {
- if (a >= kMinInt - b) return AddAssertNoOverflow(a, b);
+ }
+ if (result < kMinInt) {
+ *overflow = true;
return kMinInt;
}
+ return static_cast<int32_t>(result);
}
-static int32_t SubWithoutOverflow(int32_t a, int32_t b) {
- if (b < 0) {
- if (a <= kMaxInt + b) return SubAssertNoOverflow(a, b);
- return kMaxInt;
- } else {
- if (a >= kMinInt + b) return SubAssertNoOverflow(a, b);
- return kMinInt;
- }
+static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+ int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
+ return ConvertAndSetOverflow(result, overflow);
+}
+
+
+static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+ int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
+ return ConvertAndSetOverflow(result, overflow);
}
static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
- if (b == 0 || a == 0) return 0;
- if (a == 1) return b;
- if (b == 1) return a;
-
- int sign = 1;
- if ((a < 0 && b > 0) || (a > 0 && b < 0)) sign = -1;
- if (a < 0) a = -a;
- if (b < 0) b = -b;
-
- if (kMaxInt / b > a && a != kMinInt && b != kMinInt) {
- return MulAssertNoOverflow(a, b) * sign;
- }
-
- *overflow = true;
- if (sign == 1) {
- return kMaxInt;
- } else {
- return kMinInt;
- }
+ int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
+ return ConvertAndSetOverflow(result, overflow);
}
@@ -143,39 +108,32 @@
}
-void Range::Add(int32_t value) {
+void Range::AddConstant(int32_t value) {
if (value == 0) return;
- lower_ = AddWithoutOverflow(lower_, value);
- upper_ = AddWithoutOverflow(upper_, value);
+ bool may_overflow = false; // Overflow is ignored here.
+ lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
+ upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
Verify();
}
-// Returns whether the add may overflow.
bool Range::AddAndCheckOverflow(Range* other) {
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = AddWithoutOverflow(lower_, other->lower());
- upper_ = AddWithoutOverflow(upper_, other->upper());
- bool r = (old_lower + other->lower() != lower_ ||
- old_upper + other->upper() != upper_);
+ bool may_overflow = false;
+ lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
+ upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
KeepOrder();
Verify();
- return r;
+ return may_overflow;
}
-// Returns whether the sub may overflow.
bool Range::SubAndCheckOverflow(Range* other) {
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = SubWithoutOverflow(lower_, other->lower());
- upper_ = SubWithoutOverflow(upper_, other->upper());
- bool r = (old_lower - other->lower() != lower_ ||
- old_upper - other->upper() != upper_);
+ bool may_overflow = false;
+ lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
+ upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
KeepOrder();
Verify();
- return r;
+ return may_overflow;
}
@@ -193,7 +151,6 @@
}
-// Returns whether the mul may overflow.
bool Range::MulAndCheckOverflow(Range* other) {
bool may_overflow = false;
int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index ff1ab1a..aafa7a8 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -77,6 +77,7 @@
// HLoadKeyedFastElement
// HLoadKeyedGeneric
// HLoadNamedGeneric
+// HPower
// HStoreNamed
// HStoreNamedField
// HStoreNamedGeneric
@@ -93,13 +94,13 @@
// HCallStub
// HConstant
// HControlInstruction
+// HDeoptimize
// HGoto
// HUnaryControlInstruction
// HBranch
// HCompareMapAndBranch
// HReturn
// HThrow
-// HDeoptimize
// HEnterInlined
// HFunctionLiteral
// HGlobalObject
@@ -139,6 +140,7 @@
// HHasCachedArrayIndex
// HHasInstanceType
// HIsNull
+// HIsObject
// HIsSmi
// HValueOf
// HUnknownOSRValue
@@ -207,6 +209,7 @@
V(Goto) \
V(InstanceOf) \
V(IsNull) \
+ V(IsObject) \
V(IsSmi) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
@@ -223,6 +226,7 @@
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
+ V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -330,6 +334,9 @@
set_can_be_minus_zero(false);
}
+ // Adds a constant to the lower and upper bound of the range.
+ void AddConstant(int32_t value);
+
void StackUpon(Range* other) {
Intersect(other);
next_ = other;
@@ -349,7 +356,8 @@
set_can_be_minus_zero(b);
}
- void Add(int32_t value);
+ // Compute a new result range and return true, if the operation
+ // can overflow.
bool AddAndCheckOverflow(Range* other);
bool SubAndCheckOverflow(Range* other);
bool MulAndCheckOverflow(Range* other);
@@ -1364,7 +1372,7 @@
class HUnaryMathOperation: public HUnaryOperation {
public:
- HUnaryMathOperation(HValue* value, MathFunctionId op)
+ HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
: HUnaryOperation(value), op_(op) {
switch (op) {
case kMathFloor:
@@ -1377,8 +1385,12 @@
SetFlag(kFlexibleRepresentation);
break;
case kMathSqrt:
- default:
+ case kMathPowHalf:
+ case kMathLog:
set_representation(Representation::Double());
+ break;
+ default:
+ UNREACHABLE();
}
SetFlag(kUseGVN);
}
@@ -1395,6 +1407,8 @@
case kMathRound:
case kMathCeil:
case kMathSqrt:
+ case kMathPowHalf:
+ case kMathLog:
return Representation::Double();
break;
case kMathAbs:
@@ -1415,13 +1429,19 @@
return this;
}
- MathFunctionId op() const { return op_; }
+ BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
+ return op_ == b->op();
+ }
+
private:
- MathFunctionId op_;
+ BuiltinFunctionId op_;
};
@@ -2087,11 +2107,25 @@
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HIsNull* b = HIsNull::cast(other);
+ return is_strict_ == b->is_strict();
+ }
+
private:
bool is_strict_;
};
+class HIsObject: public HUnaryPredicate {
+ public:
+ explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+};
+
+
class HIsSmi: public HUnaryPredicate {
public:
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
@@ -2116,6 +2150,12 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HHasInstanceType* b = HHasInstanceType::cast(other);
+ return (from_ == b->from()) && (to_ == b->to());
+ }
+
private:
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
@@ -2141,6 +2181,12 @@
Handle<String> class_name() const { return class_name_; }
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HClassOfTest* b = HClassOfTest::cast(other);
+ return class_name_.is_identical_to(b->class_name_);
+ }
+
private:
Handle<String> class_name_;
};
@@ -2184,6 +2230,22 @@
};
+class HPower: public HBinaryOperation {
+ public:
+ HPower(HValue* left, HValue* right)
+ : HBinaryOperation(left, right) {
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return (index == 1) ? Representation::None() : Representation::Double();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+};
+
+
class HAdd: public HArithmeticBinaryOperation {
public:
HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 0e8c476..32108dc 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -64,9 +64,7 @@
first_instruction_index_(-1),
last_instruction_index_(-1),
deleted_phis_(4),
- is_inline_return_target_(false),
- inverted_(false),
- deopt_predecessor_(NULL) {
+ is_inline_return_target_(false) {
}
@@ -1031,12 +1029,12 @@
} else if (op == Token::LT || op == Token::LTE) {
new_range = range->CopyClearLower();
if (op == Token::LT) {
- new_range->Add(-1);
+ new_range->AddConstant(-1);
}
} else if (op == Token::GT || op == Token::GTE) {
new_range = range->CopyClearUpper();
if (op == Token::GT) {
- new_range->Add(1);
+ new_range->AddConstant(1);
}
}
@@ -1292,15 +1290,15 @@
for (int i = 0; i < graph_->blocks()->length(); i++) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
- HBasicBlock* backedge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = backedge;
- bool backedge_dominated_by_call = false;
- while (dominator != block && !backedge_dominated_by_call) {
+ HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+ HBasicBlock* dominator = back_edge;
+ bool back_edge_dominated_by_call = false;
+ while (dominator != block && !back_edge_dominated_by_call) {
HInstruction* instr = dominator->first();
- while (instr != NULL && !backedge_dominated_by_call) {
+ while (instr != NULL && !back_edge_dominated_by_call) {
if (instr->IsCall()) {
- RemoveStackCheck(backedge);
- backedge_dominated_by_call = true;
+ RemoveStackCheck(back_edge);
+ back_edge_dominated_by_call = true;
}
instr = instr->next();
}
@@ -1983,6 +1981,9 @@
AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
: owner_(owner), kind_(kind), outer_(owner->ast_context()) {
owner->set_ast_context(this); // Push.
+#ifdef DEBUG
+ original_count_ = owner->environment()->total_count();
+#endif
}
@@ -1991,6 +1992,92 @@
}
+EffectContext::~EffectContext() {
+ ASSERT(owner()->HasStackOverflow() ||
+ !owner()->subgraph()->HasExit() ||
+ owner()->environment()->total_count() == original_count_);
+}
+
+
+ValueContext::~ValueContext() {
+ ASSERT(owner()->HasStackOverflow() ||
+ !owner()->subgraph()->HasExit() ||
+ owner()->environment()->total_count() == original_count_ + 1);
+}
+
+
+void EffectContext::ReturnValue(HValue* value) {
+ // The value is simply ignored.
+}
+
+
+void ValueContext::ReturnValue(HValue* value) {
+ // The value is tracked in the bailout environment, and communicated
+ // through the environment as the result of the expression.
+ owner()->Push(value);
+}
+
+
+void TestContext::ReturnValue(HValue* value) {
+ BuildBranch(value);
+}
+
+
+void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ owner()->AddInstruction(instr);
+ if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+}
+
+
+void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ owner()->AddInstruction(instr);
+ owner()->Push(instr);
+ if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+}
+
+
+void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ HGraphBuilder* builder = owner();
+ builder->AddInstruction(instr);
+ // We expect a simulate after every expression with side effects, though
+ // this one isn't actually needed (and wouldn't work if it were targeted).
+ if (instr->HasSideEffects()) {
+ builder->Push(instr);
+ builder->AddSimulate(ast_id);
+ builder->Pop();
+ }
+ BuildBranch(instr);
+}
+
+
+void TestContext::BuildBranch(HValue* value) {
+ // We expect the graph to be in edge-split form: there is no edge that
+ // connects a branch node to a join node. We conservatively ensure that
+ // property by always adding an empty block on the outgoing edges of this
+ // branch.
+ HGraphBuilder* builder = owner();
+ HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
+ HBranch* branch = new HBranch(empty_true, empty_false, value);
+ builder->CurrentBlock()->Finish(branch);
+
+ HValue* const no_return_value = NULL;
+ HBasicBlock* true_target = if_true();
+ if (true_target->IsInlineReturnTarget()) {
+ empty_true->AddLeaveInlined(no_return_value, true_target);
+ } else {
+ empty_true->Goto(true_target);
+ }
+
+ HBasicBlock* false_target = if_false();
+ if (false_target->IsInlineReturnTarget()) {
+ empty_false->AddLeaveInlined(no_return_value, false_target);
+ } else {
+ empty_false->Goto(false_target);
+ }
+ builder->subgraph()->set_exit_block(NULL);
+}
+
// HGraphBuilder infrastructure for bailing out and checking bailouts.
#define BAILOUT(reason) \
@@ -2020,6 +2107,13 @@
} while (false)
+#define VISIT_FOR_CONTROL(expr, true_block, false_block) \
+ do { \
+ VisitForControl(expr, true_block, false_block); \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
// 'thing' could be an expression, statement, or list of statements.
#define ADD_TO_SUBGRAPH(graph, thing) \
do { \
@@ -2061,55 +2155,22 @@
void HGraphBuilder::VisitForEffect(Expression* expr) {
-#ifdef DEBUG
- int original_count = environment()->total_count();
-#endif
- BinaryOperation* binary_op = expr->AsBinaryOperation();
-
- // We use special casing for expression types not handled properly by our
- // usual trick of pretending they're in a value context and cleaning up
- // later.
- if (binary_op != NULL && binary_op->op() == Token::COMMA) {
- VISIT_FOR_EFFECT(binary_op->left());
- VISIT_FOR_EFFECT(binary_op->right());
- } else {
- { EffectContext for_effect(this);
- Visit(expr);
- }
- if (HasStackOverflow() || !subgraph()->HasExit()) return;
- // Discard return value.
- Pop();
- // TODO(kasperl): Try to improve the way we compute the last added
- // instruction. The NULL check makes me uncomfortable.
- HValue* last = subgraph()->exit_block()->GetLastInstruction();
- // We need to ensure we emit a simulate after inlined functions in an
- // effect context, to avoid having a bailout target the fictional
- // environment with the return value on top.
- if ((last != NULL && last->HasSideEffects()) ||
- subgraph()->exit_block()->IsInlineReturnTarget()) {
- AddSimulate(expr->id());
- }
- }
-
- ASSERT(environment()->total_count() == original_count);
+ EffectContext for_effect(this);
+ Visit(expr);
}
void HGraphBuilder::VisitForValue(Expression* expr) {
-#ifdef DEBUG
- int original_height = environment()->values()->length();
-#endif
- { ValueContext for_value(this);
- Visit(expr);
- }
- if (HasStackOverflow() || !subgraph()->HasExit()) return;
- // TODO(kasperl): Try to improve the way we compute the last added
- // instruction. The NULL check makes me uncomfortable.
- HValue* last = subgraph()->exit_block()->GetLastInstruction();
- if (last != NULL && last->HasSideEffects()) {
- AddSimulate(expr->id());
- }
- ASSERT(environment()->values()->length() == original_height + 1);
+ ValueContext for_value(this);
+ Visit(expr);
+}
+
+
+void HGraphBuilder::VisitForControl(Expression* expr,
+ HBasicBlock* true_block,
+ HBasicBlock* false_block) {
+ TestContext for_test(this, true_block, false_block);
+ Visit(expr);
}
@@ -2202,144 +2263,6 @@
}
-void HGraphBuilder::VisitCondition(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block,
- bool invert_true,
- bool invert_false) {
- VisitForControl(expr, true_block, false_block, invert_true, invert_false);
- CHECK_BAILOUT;
-#ifdef DEBUG
- HValue* value = true_block->predecessors()->at(0)->last_environment()->Top();
- true_block->set_cond(HConstant::cast(value)->handle());
-
- value = false_block->predecessors()->at(0)->last_environment()->Top();
- false_block->set_cond(HConstant::cast(value)->handle());
-#endif
-
- true_block->SetJoinId(expr->id());
- false_block->SetJoinId(expr->id());
- true_block->last_environment()->Pop();
- false_block->last_environment()->Pop();
-}
-
-
-void HGraphBuilder::AddConditionToSubgraph(HSubgraph* subgraph,
- Expression* expr,
- HSubgraph* true_graph,
- HSubgraph* false_graph) {
- SubgraphScope scope(this, subgraph);
- VisitCondition(expr,
- true_graph->entry_block(),
- false_graph->entry_block(),
- false,
- false);
-}
-
-
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block,
- bool invert_true,
- bool invert_false) {
- TestContext for_test(this, true_block, false_block,
- invert_true, invert_false);
- BinaryOperation* binary_op = expr->AsBinaryOperation();
- UnaryOperation* unary_op = expr->AsUnaryOperation();
-
- if (unary_op != NULL && unary_op->op() == Token::NOT) {
- VisitForControl(unary_op->expression(),
- false_block,
- true_block,
- !invert_false,
- !invert_true);
- } else if (binary_op != NULL && binary_op->op() == Token::AND) {
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- VisitForControl(binary_op->left(),
- eval_right,
- false_block,
- false,
- invert_false);
- if (HasStackOverflow()) return;
- eval_right->SetJoinId(binary_op->left()->id());
-
- // Translate right subexpression.
- eval_right->last_environment()->Pop();
- subgraph()->set_exit_block(eval_right);
- VisitForControl(binary_op->right(),
- true_block,
- false_block,
- invert_true,
- invert_false);
- } else if (binary_op != NULL && binary_op->op() == Token::OR) {
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- VisitForControl(binary_op->left(),
- true_block,
- eval_right,
- invert_true,
- false);
- if (HasStackOverflow()) return;
- eval_right->SetJoinId(binary_op->left()->id());
-
- // Translate right subexpression
- eval_right->last_environment()->Pop();
- subgraph()->set_exit_block(eval_right);
- VisitForControl(binary_op->right(),
- true_block,
- false_block,
- invert_true,
- invert_false);
- } else {
-#ifdef DEBUG
- int original_length = environment()->values()->length();
-#endif
- // TODO(kmillikin): Refactor to avoid. This code is duplicated from
- // VisitForValue, except without pushing a value context on the
- // expression context stack.
- Visit(expr);
- if (HasStackOverflow() || !subgraph()->HasExit()) return;
- HValue* last = subgraph()->exit_block()->GetLastInstruction();
- if (last != NULL && last->HasSideEffects()) {
- AddSimulate(expr->id());
- }
- ASSERT(environment()->values()->length() == original_length + 1);
- HValue* value = Pop();
- HBasicBlock* materialize_true = graph()->CreateBasicBlock();
- HBasicBlock* materialize_false = graph()->CreateBasicBlock();
- CurrentBlock()->Finish(new HBranch(materialize_true,
- materialize_false,
- value));
- HValue* true_value = invert_true
- ? graph()->GetConstantFalse()
- : graph()->GetConstantTrue();
- materialize_true->set_inverted(invert_true);
- true_block->set_deopt_predecessor(materialize_true);
-
- if (true_block->IsInlineReturnTarget()) {
- materialize_true->AddLeaveInlined(true_value, true_block);
- } else {
- materialize_true->last_environment()->Push(true_value);
- materialize_true->Goto(true_block);
- }
- HValue* false_value = invert_false
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- materialize_false->set_inverted(invert_false);
- false_block->set_deopt_predecessor(materialize_false);
-
- if (false_block->IsInlineReturnTarget()) {
- materialize_false->AddLeaveInlined(false_value, false_block);
- } else {
- materialize_false->last_environment()->Push(false_value);
- materialize_false->Goto(false_block);
- }
- subgraph()->set_exit_block(NULL);
- }
-}
-
-
void HGraphBuilder::AddToSubgraph(HSubgraph* graph,
ZoneList<Statement*>* stmts) {
SubgraphScope scope(this, graph);
@@ -2372,12 +2295,6 @@
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr, int position) {
- instr->set_position(position);
- PushAndAdd(instr);
-}
-
-
void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
const int kMaxStubArguments = 4;
ASSERT_GE(kMaxStubArguments, argument_count);
@@ -2392,7 +2309,7 @@
}
-void HGraphBuilder::ProcessCall(HCall* call, int source_position) {
+void HGraphBuilder::ProcessCall(HCall* call) {
for (int i = call->argument_count() - 1; i >= 0; --i) {
HValue* value = Pop();
HPushArgument* push = new HPushArgument(value);
@@ -2402,8 +2319,6 @@
for (int i = 0; i < call->argument_count(); ++i) {
AddInstruction(call->PushArgumentAt(i));
}
-
- PushAndAdd(call, source_position);
}
@@ -2527,19 +2442,24 @@
void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
if (stmt->condition()->ToBooleanIsTrue()) {
+ AddSimulate(stmt->ThenId());
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
+ AddSimulate(stmt->ElseId());
Visit(stmt->else_statement());
} else {
HSubgraph* then_graph = CreateEmptySubgraph();
HSubgraph* else_graph = CreateEmptySubgraph();
- VisitCondition(stmt->condition(),
- then_graph->entry_block(),
- else_graph->entry_block(),
- false, false);
- if (HasStackOverflow()) return;
+ VISIT_FOR_CONTROL(stmt->condition(),
+ then_graph->entry_block(),
+ else_graph->entry_block());
+
+ then_graph->entry_block()->SetJoinId(stmt->ThenId());
ADD_TO_SUBGRAPH(then_graph, stmt->then_statement());
+
+ else_graph->entry_block()->SetJoinId(stmt->ElseId());
ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
+
current_subgraph_->AppendJoin(then_graph, else_graph, stmt);
}
}
@@ -2569,9 +2489,7 @@
TestContext* test = TestContext::cast(context);
VisitForControl(stmt->expression(),
test->if_true(),
- test->if_false(),
- false,
- false);
+ test->if_false());
} else {
HValue* return_value = NULL;
if (context->IsEffect()) {
@@ -2617,6 +2535,8 @@
void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
VISIT_FOR_VALUE(stmt->tag());
+ // TODO(3168478): simulate added for tag should be enough.
+ AddSimulate(stmt->EntryId());
HValue* switch_value = Pop();
ZoneList<CaseClause*>* clauses = stmt->cases();
@@ -2624,11 +2544,17 @@
if (num_clauses == 0) return;
if (num_clauses > 128) BAILOUT("SwitchStatement: too many clauses");
+ int num_smi_clauses = num_clauses;
for (int i = 0; i < num_clauses; i++) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) continue;
clause->RecordTypeFeedback(oracle());
- if (!clause->IsSmiCompare()) BAILOUT("SwitchStatement: non-smi compare");
+ if (!clause->IsSmiCompare()) {
+ if (i == 0) BAILOUT("SwitchStatement: no smi compares");
+ // We will deoptimize if the first non-smi compare is reached.
+ num_smi_clauses = i;
+ break;
+ }
if (!clause->label()->IsSmiLiteral()) {
BAILOUT("SwitchStatement: non-literal switch label");
}
@@ -2639,17 +2565,18 @@
// Build a series of empty subgraphs for the comparisons.
// The default clause does not have a comparison subgraph.
- ZoneList<HSubgraph*> compare_graphs(num_clauses);
- for (int i = 0; i < num_clauses; i++) {
- HSubgraph* subgraph = !clauses->at(i)->is_default()
- ? CreateEmptySubgraph()
- : NULL;
- compare_graphs.Add(subgraph);
+ ZoneList<HSubgraph*> compare_graphs(num_smi_clauses);
+ for (int i = 0; i < num_smi_clauses; i++) {
+ if (clauses->at(i)->is_default()) {
+ compare_graphs.Add(NULL);
+ } else {
+ compare_graphs.Add(CreateEmptySubgraph());
+ }
}
HSubgraph* prev_graph = current_subgraph_;
HCompare* prev_compare_inst = NULL;
- for (int i = 0; i < num_clauses; i++) {
+ for (int i = 0; i < num_smi_clauses; i++) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) continue;
@@ -2666,6 +2593,7 @@
}
// Build instructions for current subgraph.
+ ASSERT(clause->IsSmiCompare());
prev_compare_inst = BuildSwitchCompare(subgraph, switch_value, clause);
if (HasStackOverflow()) return;
@@ -2685,33 +2613,52 @@
prev_compare_inst));
}
+ // If we have a non-smi compare clause, we deoptimize after trying
+ // all the previous compares.
+ if (num_smi_clauses < num_clauses) {
+ last_false_block->Finish(new HDeoptimize);
+ }
+
// Build statement blocks, connect them to their comparison block and
// to the previous statement block, if there is a fall-through.
HSubgraph* previous_subgraph = NULL;
for (int i = 0; i < num_clauses; i++) {
CaseClause* clause = clauses->at(i);
- HSubgraph* subgraph = CreateEmptySubgraph();
+ // Subgraph for the statements of the clause is only created when
+ // it's reachable either from the corresponding compare or as a
+ // fall-through from previous statements.
+ HSubgraph* subgraph = NULL;
- if (clause->is_default()) {
- // Default clause: Connect it to the last false block.
- last_false_block->Finish(new HGoto(subgraph->entry_block()));
- } else {
- // Connect with the corresponding comparison.
- HBasicBlock* empty =
- compare_graphs.at(i)->exit_block()->end()->FirstSuccessor();
- empty->Finish(new HGoto(subgraph->entry_block()));
+ if (i < num_smi_clauses) {
+ if (clause->is_default()) {
+ if (!last_false_block->IsFinished()) {
+ // Default clause: Connect it to the last false block.
+ subgraph = CreateEmptySubgraph();
+ last_false_block->Finish(new HGoto(subgraph->entry_block()));
+ }
+ } else {
+ ASSERT(clause->IsSmiCompare());
+ // Connect with the corresponding comparison.
+ subgraph = CreateEmptySubgraph();
+ HBasicBlock* empty =
+ compare_graphs.at(i)->exit_block()->end()->FirstSuccessor();
+ empty->Finish(new HGoto(subgraph->entry_block()));
+ }
}
// Check for fall-through from previous statement block.
if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+ if (subgraph == NULL) subgraph = CreateEmptySubgraph();
previous_subgraph->exit_block()->
Finish(new HGoto(subgraph->entry_block()));
}
- ADD_TO_SUBGRAPH(subgraph, clause->statements());
- HBasicBlock* break_block = subgraph->BundleBreak(stmt);
- if (break_block != NULL) {
- break_block->Finish(new HGoto(single_exit_block));
+ if (subgraph != NULL) {
+ ADD_TO_SUBGRAPH(subgraph, clause->statements());
+ HBasicBlock* break_block = subgraph->BundleBreak(stmt);
+ if (break_block != NULL) {
+ break_block->Finish(new HGoto(single_exit_block));
+ }
}
previous_subgraph = subgraph;
@@ -2719,7 +2666,7 @@
// If the last statement block has a fall-through, connect it to the
// single exit block.
- if (previous_subgraph->HasExit()) {
+ if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block));
}
@@ -2785,8 +2732,14 @@
} else {
HSubgraph* go_back = CreateEmptySubgraph();
HSubgraph* exit = CreateEmptySubgraph();
- AddConditionToSubgraph(body_graph, stmt->cond(), go_back, exit);
- if (HasStackOverflow()) return;
+ {
+ SubgraphScope scope(this, body_graph);
+ VISIT_FOR_CONTROL(stmt->cond(),
+ go_back->entry_block(),
+ exit->entry_block());
+ go_back->entry_block()->SetJoinId(stmt->BackEdgeId());
+ exit->entry_block()->SetJoinId(stmt->ExitId());
+ }
current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit);
}
}
@@ -2813,8 +2766,14 @@
cond_graph = CreateLoopHeaderSubgraph(environment());
body_graph = CreateEmptySubgraph();
exit_graph = CreateEmptySubgraph();
- AddConditionToSubgraph(cond_graph, stmt->cond(), body_graph, exit_graph);
- if (HasStackOverflow()) return;
+ {
+ SubgraphScope scope(this, cond_graph);
+ VISIT_FOR_CONTROL(stmt->cond(),
+ body_graph->entry_block(),
+ exit_graph->entry_block());
+ body_graph->entry_block()->SetJoinId(stmt->BodyId());
+ exit_graph->entry_block()->SetJoinId(stmt->ExitId());
+ }
ADD_TO_SUBGRAPH(body_graph, stmt->body());
}
@@ -2864,13 +2823,18 @@
cond_graph = CreateLoopHeaderSubgraph(environment());
body_graph = CreateEmptySubgraph();
exit_graph = CreateEmptySubgraph();
- AddConditionToSubgraph(cond_graph, stmt->cond(), body_graph, exit_graph);
- if (HasStackOverflow()) return;
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
+ {
+ SubgraphScope scope(this, cond_graph);
+ VISIT_FOR_CONTROL(stmt->cond(),
+ body_graph->entry_block(),
+ exit_graph->entry_block());
+ body_graph->entry_block()->SetJoinId(stmt->BodyId());
+ exit_graph->entry_block()->SetJoinId(stmt->ExitId());
+ }
} else {
body_graph = CreateLoopHeaderSubgraph(environment());
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
}
+ ADD_TO_SUBGRAPH(body_graph, stmt->body());
HSubgraph* next_graph = NULL;
body_graph->ResolveContinue(stmt);
@@ -2914,7 +2878,9 @@
Handle<SharedFunctionInfo> shared_info =
Compiler::BuildFunctionInfo(expr, graph_->info()->script());
CHECK_BAILOUT;
- PushAndAdd(new HFunctionLiteral(shared_info, expr->pretenure()));
+ HFunctionLiteral* instr =
+ new HFunctionLiteral(shared_info, expr->pretenure());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -2927,28 +2893,32 @@
void HGraphBuilder::VisitConditional(Conditional* expr) {
HSubgraph* then_graph = CreateEmptySubgraph();
HSubgraph* else_graph = CreateEmptySubgraph();
- VisitCondition(expr->condition(),
- then_graph->entry_block(),
- else_graph->entry_block(),
- false, false);
- if (HasStackOverflow()) return;
+ VISIT_FOR_CONTROL(expr->condition(),
+ then_graph->entry_block(),
+ else_graph->entry_block());
+
+ then_graph->entry_block()->SetJoinId(expr->ThenId());
ADD_TO_SUBGRAPH(then_graph, expr->then_expression());
+
+ else_graph->entry_block()->SetJoinId(expr->ElseId());
ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
+
current_subgraph_->AppendJoin(then_graph, else_graph, expr);
+ ast_context()->ReturnValue(Pop());
}
-void HGraphBuilder::LookupGlobalPropertyCell(VariableProxy* expr,
+void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
LookupResult* lookup,
bool is_store) {
- if (expr->is_this()) {
+ if (var->is_this()) {
BAILOUT("global this reference");
}
if (!graph()->info()->has_global_object()) {
BAILOUT("no global object to optimize VariableProxy");
}
Handle<GlobalObject> global(graph()->info()->global_object());
- global->Lookup(*expr->name(), lookup);
+ global->Lookup(*var->name(), lookup);
if (!lookup->IsProperty()) {
BAILOUT("global variable cell not yet introduced");
}
@@ -2961,23 +2931,6 @@
}
-void HGraphBuilder::HandleGlobalVariableLoad(VariableProxy* expr) {
- LookupResult lookup;
- LookupGlobalPropertyCell(expr, &lookup, false);
- CHECK_BAILOUT;
-
- Handle<GlobalObject> global(graph()->info()->global_object());
- // TODO(3039103): Handle global property load through an IC call when access
- // checks are enabled.
- if (global->IsAccessCheckNeeded()) {
- BAILOUT("global object requires access check");
- }
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- PushAndAdd(new HLoadGlobal(cell, check_hole));
-}
-
-
void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Variable* variable = expr->AsVariable();
if (variable == NULL) {
@@ -2986,9 +2939,22 @@
if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
BAILOUT("unsupported context for arguments object");
}
- Push(environment()->Lookup(variable));
+ ast_context()->ReturnValue(environment()->Lookup(variable));
} else if (variable->is_global()) {
- HandleGlobalVariableLoad(expr);
+ LookupResult lookup;
+ LookupGlobalPropertyCell(variable, &lookup, false);
+ CHECK_BAILOUT;
+
+ Handle<GlobalObject> global(graph()->info()->global_object());
+ // TODO(3039103): Handle global property load through an IC call when access
+ // checks are enabled.
+ if (global->IsAccessCheckNeeded()) {
+ BAILOUT("global object requires access check");
+ }
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+ HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
+ ast_context()->ReturnInstruction(instr, expr->id());
} else {
BAILOUT("reference to non-stack-allocated/non-global variable");
}
@@ -2996,14 +2962,16 @@
void HGraphBuilder::VisitLiteral(Literal* expr) {
- PushAndAdd(new HConstant(expr->handle(), Representation::Tagged()));
+ HConstant* instr = new HConstant(expr->handle(), Representation::Tagged());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- PushAndAdd(new HRegExpLiteral(expr->pattern(),
- expr->flags(),
- expr->literal_index()));
+ HRegExpLiteral* instr = new HRegExpLiteral(expr->pattern(),
+ expr->flags(),
+ expr->literal_index());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3012,6 +2980,8 @@
expr->fast_elements(),
expr->literal_index(),
expr->depth()));
+ // The object is expected in the bailout environment during computation
+ // of the property values and is the value of the entire expression.
PushAndAdd(literal);
expr->CalculateEmitStore();
@@ -3048,6 +3018,7 @@
default: UNREACHABLE();
}
}
+ ast_context()->ReturnValue(Pop());
}
@@ -3059,8 +3030,11 @@
length,
expr->literal_index(),
expr->depth());
+ // The array is expected in the bailout environment during computation
+ // of the property values and is the value of the entire expression.
PushAndAdd(literal);
- HValue* elements = AddInstruction(new HLoadElements(literal));
+
+ HLoadElements* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
@@ -3071,11 +3045,19 @@
VISIT_FOR_VALUE(subexpr);
HValue* value = Pop();
if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+
+ // Load the elements array before the first store.
+ if (elements == NULL) {
+ elements = new HLoadElements(literal);
+ AddInstruction(elements);
+ }
+
HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
AddInstruction(new HStoreKeyedFastElement(elements, key, value));
AddSimulate(expr->GetIdForElement(i));
}
+ ast_context()->ReturnValue(Pop());
}
@@ -3257,27 +3239,29 @@
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- return;
- }
-
- // Build subgraph for generic store through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
- } else {
- HInstruction* instr = new HStoreNamedGeneric(object, name, value);
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->id());
+ } else {
+ // Build subgraph for generic store through IC.
+ {
+ HSubgraph* subgraph = CreateBranchSubgraph(environment());
+ SubgraphScope scope(this, subgraph);
+ if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+ subgraph->FinishExit(new HDeoptimize());
+ } else {
+ HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+ Push(value);
+ instr->set_position(expr->position());
+ AddInstruction(instr);
+ }
+ subgraphs.Add(subgraph);
}
- subgraphs.Add(subgraph);
+
+ HBasicBlock* new_exit_block =
+ BuildTypeSwitch(&maps, &subgraphs, object, expr->AssignmentId());
+ subgraph()->set_exit_block(new_exit_block);
}
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
- current_subgraph_->set_exit_block(new_exit_block);
+ if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
}
@@ -3333,14 +3317,20 @@
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
}
-void HGraphBuilder::HandleGlobalVariableAssignment(VariableProxy* proxy,
+// Because not every expression has a position and there is not common
+// superclass of Assignment and CountOperation, we cannot just pass the
+// owning expression instead of position and ast_id separately.
+void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
HValue* value,
- int position) {
+ int position,
+ int ast_id) {
LookupResult lookup;
- LookupGlobalPropertyCell(proxy, &lookup, true);
+ LookupGlobalPropertyCell(var, &lookup, true);
CHECK_BAILOUT;
Handle<GlobalObject> global(graph()->info()->global_object());
@@ -3348,6 +3338,7 @@
HInstruction* instr = new HStoreGlobal(value, cell);
instr->set_position(position);
AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(ast_id);
}
@@ -3371,10 +3362,15 @@
VISIT_FOR_VALUE(operation);
if (var->is_global()) {
- HandleGlobalVariableAssignment(proxy, Top(), expr->position());
+ HandleGlobalVariableAssignment(var,
+ Top(),
+ expr->position(),
+ expr->AssignmentId());
} else {
Bind(var, Top());
}
+ ast_context()->ReturnValue(Pop());
+
} else if (prop != NULL) {
prop->RecordTypeFeedback(oracle());
@@ -3392,9 +3388,7 @@
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) {
- AddSimulate(expr->compound_bailout_id());
- }
+ if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
VISIT_FOR_VALUE(expr->value());
HValue* right = Pop();
@@ -3406,10 +3400,11 @@
HInstruction* store = BuildStoreNamed(obj, instr, prop);
AddInstruction(store);
-
- // Drop the simulated receiver and value and put back the value.
+ // Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
+ if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
} else {
// Keyed property.
@@ -3425,9 +3420,7 @@
? BuildLoadKeyedFastElement(obj, key, prop)
: BuildLoadKeyedGeneric(obj, key);
PushAndAdd(load);
- if (load->HasSideEffects()) {
- AddSimulate(expr->compound_bailout_id());
- }
+ if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
VISIT_FOR_VALUE(expr->value());
HValue* right = Pop();
@@ -3441,11 +3434,13 @@
? BuildStoreKeyedFastElement(obj, key, instr, prop)
: BuildStoreKeyedGeneric(obj, key, instr);
AddInstruction(store);
-
- // Drop the simulated receiver, key and value and put back the value.
+ // Drop the simulated receiver, key, and value. Return the value.
Drop(3);
Push(instr);
+ if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
}
+
} else {
BAILOUT("invalid lhs in compound assignment");
}
@@ -3465,9 +3460,14 @@
if (var != NULL) {
if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+
+ // Handle the assignment.
if (var->is_global()) {
VISIT_FOR_VALUE(expr->value());
- HandleGlobalVariableAssignment(proxy, Top(), expr->position());
+ HandleGlobalVariableAssignment(var,
+ Top(),
+ expr->position(),
+ expr->AssignmentId());
} else {
// We allow reference to the arguments object only in assignemtns
// to local variables to make sure that the arguments object does
@@ -3480,9 +3480,11 @@
} else {
VISIT_FOR_VALUE(expr->value());
}
-
Bind(proxy->var(), Top());
}
+ // Return the value.
+ ast_context()->ReturnValue(Pop());
+
} else if (prop != NULL) {
HandlePropertyAssignment(expr);
} else {
@@ -3492,6 +3494,10 @@
void HGraphBuilder::VisitThrow(Throw* expr) {
+ // We don't optimize functions with invalid left-hand sides in
+ // assignments, count operations, or for-in. Consequently throw can
+ // currently only occur in an effect context.
+ ASSERT(ast_context()->IsEffect());
VISIT_FOR_VALUE(expr->exception());
HValue* value = environment()->Pop();
@@ -3525,7 +3531,8 @@
SubgraphScope scope(this, subgraph);
HInstruction* instr =
BuildLoadNamedField(object, expr, map, &lookup, false);
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ PushAndAdd(instr);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@@ -3536,26 +3543,30 @@
// generic load.
if (maps.length() == 0) {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
- PushAndAdd(instr, expr->position());
- return;
- }
-
- // Build subgraph for generic load through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
- } else {
- HInstruction* instr = BuildLoadNamedGeneric(object, expr);
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ PushAndAdd(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->id());
+ } else {
+ // Build subgraph for generic load through IC.
+ {
+ HSubgraph* subgraph = CreateBranchSubgraph(environment());
+ SubgraphScope scope(this, subgraph);
+ if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+ subgraph->FinishExit(new HDeoptimize());
+ } else {
+ HInstruction* instr = BuildLoadNamedGeneric(object, expr);
+ instr->set_position(expr->position());
+ PushAndAdd(instr);
+ }
+ subgraphs.Add(subgraph);
}
- subgraphs.Add(subgraph);
+
+ HBasicBlock* new_exit_block =
+ BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+ subgraph()->set_exit_block(new_exit_block);
}
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
- current_subgraph_->set_exit_block(new_exit_block);
+ if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
}
@@ -3603,6 +3614,11 @@
map,
&lookup,
true);
+ } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+ AddInstruction(new HCheckNonSmi(obj));
+ AddInstruction(new HCheckMap(obj, map));
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
+ return new HConstant(function, Representation::Tagged());
} else {
return BuildLoadNamedGeneric(obj, expr);
}
@@ -3668,11 +3684,12 @@
return false;
}
+ HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
if (!name->IsEqualTo(CStrVector("length"))) return false;
HInstruction* elements = AddInstruction(new HArgumentsElements);
- PushAndAdd(new HArgumentsLength(elements));
+ result = new HArgumentsLength(elements);
} else {
VisitForValue(expr->key());
if (HasStackOverflow()) return false;
@@ -3680,8 +3697,9 @@
HInstruction* elements = AddInstruction(new HArgumentsElements);
HInstruction* length = AddInstruction(new HArgumentsLength(elements));
AddInstruction(new HBoundsCheck(key, length));
- PushAndAdd(new HAccessArgumentsAt(elements, length, key));
+ result = new HAccessArgumentsAt(elements, length, key);
}
+ ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -3728,7 +3746,8 @@
? BuildLoadKeyedFastElement(obj, key, expr)
: BuildLoadKeyedGeneric(obj, key);
}
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3763,9 +3782,9 @@
// Build subgraphs for each of the specific maps.
//
- // TODO(ager): We should recognize when the prototype chains for
- // different maps are identical. In that case we can avoid
- // repeatedly generating the same prototype map checks.
+ // TODO(ager): We should recognize when the prototype chains for different
+ // maps are identical. In that case we can avoid repeatedly generating the
+ // same prototype map checks.
for (int i = 0; i < number_of_types; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
@@ -3782,7 +3801,9 @@
// during hydrogen processing.
CHECK_BAILOUT;
HCall* call = new HCallConstantFunction(expr->target(), argument_count);
- ProcessCall(call, expr->position());
+ call->set_position(expr->position());
+ ProcessCall(call);
+ PushAndAdd(call);
}
subgraphs.Add(subgraph);
} else {
@@ -3790,30 +3811,34 @@
}
}
- // If we couldn't compute the target for any of the maps just
- // perform an IC call.
+ // If we couldn't compute the target for any of the maps just perform an
+ // IC call.
if (maps.length() == 0) {
HCall* call = new HCallNamed(name, argument_count);
- ProcessCall(call, expr->position());
- return;
- }
-
- // Build subgraph for generic call through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
- } else {
- HCall* call = new HCallNamed(name, argument_count);
- ProcessCall(call, expr->position());
+ call->set_position(expr->position());
+ ProcessCall(call);
+ ast_context()->ReturnInstruction(call, expr->id());
+ } else {
+ // Build subgraph for generic call through IC.
+ {
+ HSubgraph* subgraph = CreateBranchSubgraph(environment());
+ SubgraphScope scope(this, subgraph);
+ if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+ subgraph->FinishExit(new HDeoptimize());
+ } else {
+ HCall* call = new HCallNamed(name, argument_count);
+ call->set_position(expr->position());
+ ProcessCall(call);
+ PushAndAdd(call);
+ }
+ subgraphs.Add(subgraph);
}
- subgraphs.Add(subgraph);
- }
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
- current_subgraph_->set_exit_block(new_exit_block);
+ HBasicBlock* new_exit_block =
+ BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
+ subgraph()->set_exit_block(new_exit_block);
+ if (new_exit_block != NULL) ast_context()->ReturnValue(Pop());
+ }
}
@@ -3940,10 +3965,7 @@
if_true->MarkAsInlineReturnTarget();
if_false->MarkAsInlineReturnTarget();
// AstContext constructor pushes on the context stack.
- bool invert_true = TestContext::cast(ast_context())->invert_true();
- bool invert_false = TestContext::cast(ast_context())->invert_false();
- test_context = new TestContext(this, if_true, if_false,
- invert_true, invert_false);
+ test_context = new TestContext(this, if_true, if_false);
function_return_ = NULL;
} else {
// Inlined body is treated as if it occurs in the original call context.
@@ -3987,16 +4009,15 @@
// simply jumping to the false target.
//
// TODO(3168478): refactor to avoid this.
- HBasicBlock* materialize_true = graph()->CreateBasicBlock();
- HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+ HBasicBlock* empty_true = graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = graph()->CreateBasicBlock();
HBranch* branch =
- new HBranch(materialize_true, materialize_false, return_value);
+ new HBranch(empty_true, empty_false, return_value);
body->exit_block()->Finish(branch);
- materialize_true->AddLeaveInlined(graph()->GetConstantTrue(),
- test_context->if_true());
- materialize_false->AddLeaveInlined(graph()->GetConstantFalse(),
- test_context->if_false());
+ HValue* const no_return_value = NULL;
+ empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
+ empty_false->AddLeaveInlined(no_return_value, test_context->if_false());
}
body->set_exit_block(NULL);
}
@@ -4015,35 +4036,20 @@
if_false->SetJoinId(expr->id());
ASSERT(ast_context() == test_context);
delete test_context; // Destructor pops from expression context stack.
- // Forward to the real test context.
- // Discard the lingering branch value (which may be true or false,
- // depending on whether the final condition was negated) and jump to the
- // true target with a true branch value.
+ // Forward to the real test context.
+ HValue* const no_return_value = NULL;
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- bool invert_true = TestContext::cast(ast_context())->invert_true();
- HValue* true_value = invert_true
- ? graph()->GetConstantFalse()
- : graph()->GetConstantTrue();
- if_true->last_environment()->Pop();
if (true_target->IsInlineReturnTarget()) {
- if_true->AddLeaveInlined(true_value, true_target);
+ if_true->AddLeaveInlined(no_return_value, true_target);
} else {
- if_true->last_environment()->Push(true_value);
if_true->Goto(true_target);
}
- // Do the same for the false target.
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- bool invert_false = TestContext::cast(ast_context())->invert_false();
- HValue* false_value = invert_false
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- if_false->last_environment()->Pop();
if (false_target->IsInlineReturnTarget()) {
- if_false->AddLeaveInlined(false_value, false_target);
+ if_false->AddLeaveInlined(no_return_value, false_target);
} else {
- if_false->last_environment()->Push(false_value);
if_false->Goto(false_target);
}
@@ -4061,6 +4067,7 @@
function_return_ = saved_function_return;
oracle_ = saved_oracle;
graph()->info()->SetOsrAstId(saved_osr_ast_id);
+
return true;
}
@@ -4069,7 +4076,7 @@
ASSERT(target->IsInlineReturnTarget());
AddInstruction(new HLeaveInlined);
HEnvironment* outer = last_environment()->outer();
- outer->Push(return_value);
+ if (return_value != NULL) outer->Push(return_value);
UpdateEnvironment(outer);
Goto(target);
}
@@ -4077,24 +4084,71 @@
bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
// Try to inline calls like Math.* as operations in the calling function.
- MathFunctionId id = expr->target()->shared()->math_function_id();
+ if (!expr->target()->shared()->IsBuiltinMathFunction()) return false;
+ BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) {
case kMathRound:
case kMathFloor:
case kMathAbs:
case kMathSqrt:
+ case kMathLog:
if (argument_count == 2) {
HValue* argument = Pop();
- // Pop receiver.
- Pop();
+ Drop(1); // Receiver.
HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
- PushAndAdd(op, expr->position());
+ op->set_position(expr->position());
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+ case kMathPow:
+ if (argument_count == 3) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Pop(); // Pop receiver.
+ HInstruction* result = NULL;
+ // Use sqrt() if exponent is 0.5 or -0.5.
+ if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
+ double exponent = HConstant::cast(right)->DoubleValue();
+ if (exponent == 0.5) {
+ result = new HUnaryMathOperation(left, kMathPowHalf);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ } else if (exponent == -0.5) {
+ HConstant* double_one =
+ new HConstant(Handle<Object>(Smi::FromInt(1)),
+ Representation::Double());
+ AddInstruction(double_one);
+ HUnaryMathOperation* square_root =
+ new HUnaryMathOperation(left, kMathPowHalf);
+ AddInstruction(square_root);
+ // MathPowHalf doesn't have side effects so there's no need for
+ // an environment simulation here.
+ ASSERT(!square_root->HasSideEffects());
+ result = new HDiv(double_one, square_root);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ } else if (exponent == 2.0) {
+ result = new HMul(left, left);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ } else if (right->IsConstant() &&
+ HConstant::cast(right)->HasInteger32Value() &&
+ HConstant::cast(right)->Integer32Value() == 2) {
+ result = new HMul(left, left);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+
+ result = new HPower(left, right);
+ ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
default:
- // Either not a special math function or not yet supported for inlining.
+ // Not yet supported for inlining.
break;
}
return false;
@@ -4134,8 +4188,10 @@
function,
expr->GetReceiverTypes()->first(),
true);
- PushAndAdd(new HApplyArguments(function, receiver, length, elements),
- expr->position());
+ HInstruction* result =
+ new HApplyArguments(function, receiver, length, elements);
+ result->set_position(expr->position());
+ ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -4163,12 +4219,10 @@
CHECK_BAILOUT;
call = new HCallKeyed(key, argument_count);
- ProcessCall(call, expr->position());
- HValue* result = Pop();
- // Drop the receiver from the environment and put back the result of
- // the call.
- Drop(1);
- Push(result);
+ call->set_position(expr->position());
+ ProcessCall(call);
+ Drop(1); // Key.
+ ast_context()->ReturnInstruction(call, expr->id());
return;
}
@@ -4191,7 +4245,19 @@
if (expr->IsMonomorphic()) {
AddCheckConstantFunction(expr, receiver, types->first(), true);
- if (TryMathFunctionInline(expr) || TryInline(expr)) {
+ if (TryMathFunctionInline(expr)) {
+ return;
+ } else if (TryInline(expr)) {
+ if (subgraph()->HasExit()) {
+ HValue* return_value = Pop();
+ // If we inlined a function in a test context then we need to emit
+ // a simulate here to shadow the ones at the end of the
+ // predecessor blocks. Those environments contain the return
+ // value on top and do not correspond to any actual state of the
+ // unoptimized code.
+ if (ast_context()->IsEffect()) AddSimulate(expr->id());
+ ast_context()->ReturnValue(return_value);
+ }
return;
} else {
// Check for bailout, as the TryInline call in the if condition above
@@ -4199,6 +4265,7 @@
CHECK_BAILOUT;
call = new HCallConstantFunction(expr->target(), argument_count);
}
+
} else if (types != NULL && types->length() > 1) {
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
@@ -4246,7 +4313,19 @@
IsGlobalObject());
environment()->SetExpressionStackAt(receiver_index, global_receiver);
- if (TryInline(expr)) return;
+ if (TryInline(expr)) {
+ if (subgraph()->HasExit()) {
+ HValue* return_value = Pop();
+ // If we inlined a function in a test context then we need to
+ // emit a simulate here to shadow the ones at the end of the
+ // predecessor blocks. Those environments contain the return
+ // value on top and do not correspond to any actual state of the
+ // unoptimized code.
+ if (ast_context()->IsEffect()) AddSimulate(expr->id());
+ ast_context()->ReturnValue(return_value);
+ }
+ return;
+ }
// Check for bailout, as trying to inline might fail due to bailout
// during hydrogen processing.
CHECK_BAILOUT;
@@ -4269,7 +4348,9 @@
}
}
- ProcessCall(call, expr->position());
+ call->set_position(expr->position());
+ ProcessCall(call);
+ ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4283,8 +4364,9 @@
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HCall* call = new HCallNew(argument_count);
-
- ProcessCall(call, expr->position());
+ call->set_position(expr->position());
+ ProcessCall(call);
+ ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4292,7 +4374,7 @@
// Lookup table for generators for runtime calls that are generated inline.
// Elements of the table are member pointers to functions of HGraphBuilder.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
&HGraphBuilder::Generate##Name,
const HGraphBuilder::InlineFunctionGenerator
@@ -4306,7 +4388,7 @@
void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->IsEqualTo(CStrVector("_Log"))) {
- Push(graph()->GetConstantUndefined());
+ ast_context()->ReturnValue(graph()->GetConstantUndefined());
return;
}
@@ -4332,11 +4414,13 @@
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
// Call the inline code generator using the pointer-to-member.
- (this->*generator)(argument_count);
+ (this->*generator)(argument_count, expr->id());
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
HCall* call = new HCallRuntime(name, expr->function(), argument_count);
- ProcessCall(call, RelocInfo::kNoPosition);
+ call->set_position(RelocInfo::kNoPosition);
+ ProcessCall(call);
+ ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4345,7 +4429,7 @@
Token::Value op = expr->op();
if (op == Token::VOID) {
VISIT_FOR_EFFECT(expr->expression());
- Push(graph()->GetConstantUndefined());
+ ast_context()->ReturnValue(graph()->GetConstantUndefined());
} else if (op == Token::DELETE) {
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
@@ -4353,36 +4437,47 @@
// Result of deleting non-property, non-variable reference is true.
// Evaluate the subexpression for side effects.
VISIT_FOR_EFFECT(expr->expression());
- Push(graph_->GetConstantTrue());
+ ast_context()->ReturnValue(graph()->GetConstantTrue());
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- Push(graph_->GetConstantFalse());
+ ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
HValue* key = Pop();
HValue* obj = Pop();
- PushAndAdd(new HDeleteProperty(obj, key));
+ ast_context()->ReturnInstruction(new HDeleteProperty(obj, key),
+ expr->id());
} else if (var->is_global()) {
BAILOUT("delete with global variable");
} else {
BAILOUT("delete with non-global variable");
}
} else if (op == Token::NOT) {
- HSubgraph* true_graph = CreateEmptySubgraph();
- HSubgraph* false_graph = CreateEmptySubgraph();
- VisitCondition(expr->expression(),
- false_graph->entry_block(),
- true_graph->entry_block(),
- true, true);
- if (HasStackOverflow()) return;
- true_graph->environment()->Push(graph_->GetConstantTrue());
- false_graph->environment()->Push(graph_->GetConstantFalse());
- current_subgraph_->AppendJoin(true_graph, false_graph, expr);
+ if (ast_context()->IsTest()) {
+ TestContext* context = TestContext::cast(ast_context());
+ VisitForControl(expr->expression(),
+ context->if_false(),
+ context->if_true());
+ } else {
+ HSubgraph* true_graph = CreateEmptySubgraph();
+ HSubgraph* false_graph = CreateEmptySubgraph();
+ VISIT_FOR_CONTROL(expr->expression(),
+ false_graph->entry_block(),
+ true_graph->entry_block());
+ true_graph->entry_block()->SetJoinId(expr->expression()->id());
+ true_graph->environment()->Push(graph_->GetConstantTrue());
+
+ false_graph->entry_block()->SetJoinId(expr->expression()->id());
+ false_graph->environment()->Push(graph_->GetConstantFalse());
+
+ current_subgraph_->AppendJoin(true_graph, false_graph, expr);
+ ast_context()->ReturnValue(Pop());
+ }
} else if (op == Token::BIT_NOT || op == Token::SUB) {
VISIT_FOR_VALUE(expr->expression());
HValue* value = Pop();
@@ -4398,11 +4493,11 @@
UNREACHABLE();
break;
}
- PushAndAdd(instr);
+ ast_context()->ReturnInstruction(instr, expr->id());
} else if (op == Token::TYPEOF) {
VISIT_FOR_VALUE(expr->expression());
HValue* value = Pop();
- PushAndAdd(new HTypeof(value));
+ ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
} else {
BAILOUT("Value: unsupported unary operation");
}
@@ -4442,22 +4537,25 @@
VISIT_FOR_VALUE(target);
- HValue* value = Pop();
- HInstruction* instr = BuildIncrement(value, inc);
- AddInstruction(instr);
-
- if (expr->is_prefix()) {
- Push(instr);
- } else {
- Push(value);
- }
+ // Match the full code generator stack by simulating an extra stack
+ // element for postfix operations in a non-effect context.
+ bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+ HValue* before = has_extra ? Top() : Pop();
+ HInstruction* after = BuildIncrement(before, inc);
+ AddInstruction(after);
+ Push(after);
if (var->is_global()) {
- HandleGlobalVariableAssignment(proxy, instr, expr->position());
+ HandleGlobalVariableAssignment(var,
+ after,
+ expr->position(),
+ expr->AssignmentId());
} else {
ASSERT(var->IsStackAllocated());
- Bind(var, instr);
+ Bind(var, after);
}
+ Drop(has_extra ? 2 : 1);
+ ast_context()->ReturnValue(expr->is_postfix() ? before : after);
} else if (prop != NULL) {
prop->RecordTypeFeedback(oracle());
@@ -4465,11 +4563,10 @@
if (prop->key()->IsPropertyName()) {
// Named property.
- // Match the full code generator stack by simulate an extra stack element
- // for postfix operations in a value context.
- if (expr->is_postfix() && !ast_context()->IsEffect()) {
- Push(graph_->GetConstantUndefined());
- }
+ // Match the full code generator stack by simulating an extra stack
+ // element for postfix operations in a non-effect context.
+ bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+ if (has_extra) Push(graph_->GetConstantUndefined());
VISIT_FOR_VALUE(prop->obj());
HValue* obj = Top();
@@ -4485,37 +4582,35 @@
PushAndAdd(load);
if (load->HasSideEffects()) AddSimulate(increment->id());
- HValue* value = Pop();
+ HValue* before = Pop();
+ // There is no deoptimization to after the increment, so we don't need
+ // to simulate the expression stack after this instruction.
+ HInstruction* after = BuildIncrement(before, inc);
+ AddInstruction(after);
- HInstruction* instr = BuildIncrement(value, inc);
- AddInstruction(instr);
-
- HInstruction* store = BuildStoreNamed(obj, instr, prop);
+ HInstruction* store = BuildStoreNamed(obj, after, prop);
AddInstruction(store);
- // Drop simulated receiver and push the result.
- // There is no deoptimization to after the increment, so we can simulate
- // the expression stack here.
- Drop(1);
- if (expr->is_prefix()) {
- Push(instr);
- } else {
- if (!ast_context()->IsEffect()) Drop(1); // Drop simulated zero.
- Push(value);
- }
+ // Overwrite the receiver in the bailout environment with the result
+ // of the operation, and the placeholder with the original value if
+ // necessary.
+ environment()->SetExpressionStackAt(0, after);
+ if (has_extra) environment()->SetExpressionStackAt(1, before);
+ if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ Drop(has_extra ? 2 : 1);
+
+ ast_context()->ReturnValue(expr->is_postfix() ? before : after);
} else {
// Keyed property.
// Match the full code generator stack by simulate an extra stack element
- // for postfix operations in a value context.
- if (expr->is_postfix() && !ast_context()->IsEffect()) {
- Push(graph_->GetConstantUndefined());
- }
+ // for postfix operations in a non-effect context.
+ bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+ if (has_extra) Push(graph_->GetConstantUndefined());
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
-
HValue* obj = environment()->ExpressionStackAt(1);
HValue* key = environment()->ExpressionStackAt(0);
@@ -4528,27 +4623,29 @@
PushAndAdd(load);
if (load->HasSideEffects()) AddSimulate(increment->id());
- HValue* value = Pop();
-
- HInstruction* instr = BuildIncrement(value, inc);
- AddInstruction(instr);
+ HValue* before = Pop();
+ // There is no deoptimization to after the increment, so we don't need
+ // to simulate the expression stack after this instruction.
+ HInstruction* after = BuildIncrement(before, inc);
+ AddInstruction(after);
HInstruction* store = is_fast_elements
- ? BuildStoreKeyedFastElement(obj, key, instr, prop)
- : new HStoreKeyedGeneric(obj, key, instr);
+ ? BuildStoreKeyedFastElement(obj, key, after, prop)
+ : new HStoreKeyedGeneric(obj, key, after);
AddInstruction(store);
- // Drop simulated receiver and key and push the result.
- // There is no deoptimization to after the increment, so we can simulate
- // the expression stack here.
- Drop(2);
- if (expr->is_prefix()) {
- Push(instr);
- } else {
- if (!ast_context()->IsEffect()) Drop(1); // Drop simulated zero.
- Push(value);
- }
+ // Drop the key from the bailout environment. Overwrite the receiver
+ // with the result of the operation, and the placeholder with the
+ // original value if necessary.
+ Drop(1);
+ environment()->SetExpressionStackAt(0, after);
+ if (has_extra) environment()->SetExpressionStackAt(1, before);
+ if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ Drop(has_extra ? 2 : 1);
+
+ ast_context()->ReturnValue(expr->is_postfix() ? before : after);
}
+
} else {
BAILOUT("invalid lhs in count operation");
}
@@ -4630,21 +4727,43 @@
void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
if (expr->op() == Token::COMMA) {
VISIT_FOR_EFFECT(expr->left());
- VISIT_FOR_VALUE(expr->right());
+ // Visit the right subexpression in the same AST context as the entire
+ // expression.
+ Visit(expr->right());
+
} else if (expr->op() == Token::AND || expr->op() == Token::OR) {
- VISIT_FOR_VALUE(expr->left());
- ASSERT(current_subgraph_->HasExit());
-
- HValue* left = Top();
bool is_logical_and = (expr->op() == Token::AND);
+ if (ast_context()->IsTest()) {
+ TestContext* context = TestContext::cast(ast_context());
+ // Translate left subexpression.
+ HBasicBlock* eval_right = graph()->CreateBasicBlock();
+ if (is_logical_and) {
+ VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
+ } else {
+ VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
+ }
+ eval_right->SetJoinId(expr->RightId());
- HEnvironment* environment_copy = environment()->Copy();
- environment_copy->Pop();
- HSubgraph* right_subgraph;
- right_subgraph = CreateBranchSubgraph(environment_copy);
- ADD_TO_SUBGRAPH(right_subgraph, expr->right());
- current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left);
- current_subgraph_->exit_block()->SetJoinId(expr->id());
+ // Translate right subexpression by visiting it in the same AST
+ // context as the entire expression.
+ subgraph()->set_exit_block(eval_right);
+ Visit(expr->right());
+
+ } else {
+ VISIT_FOR_VALUE(expr->left());
+ ASSERT(current_subgraph_->HasExit());
+
+ HValue* left = Top();
+ HEnvironment* environment_copy = environment()->Copy();
+ environment_copy->Pop();
+ HSubgraph* right_subgraph;
+ right_subgraph = CreateBranchSubgraph(environment_copy);
+ ADD_TO_SUBGRAPH(right_subgraph, expr->right());
+ current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left);
+ current_subgraph_->exit_block()->SetJoinId(expr->id());
+ ast_context()->ReturnValue(Pop());
+ }
+
} else {
VISIT_FOR_VALUE(expr->left());
VISIT_FOR_VALUE(expr->right());
@@ -4652,7 +4771,8 @@
HValue* right = Pop();
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(expr, left, right);
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
}
@@ -4691,7 +4811,8 @@
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->handle());
HInstruction* instr = new HClassOfTest(value, rhs);
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
return;
}
@@ -4705,7 +4826,8 @@
HValue* left = Pop();
HInstruction* instr = new HTypeofIs(left,
Handle<String>::cast(right_literal->handle()));
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
return;
}
@@ -4741,7 +4863,8 @@
compare->SetInputRepresentation(r);
instr = compare;
}
- PushAndAdd(instr, expr->position());
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4750,8 +4873,7 @@
HValue* value = Pop();
HIsNull* compare = new HIsNull(value, expr->is_strict());
-
- PushAndAdd(compare);
+ ast_context()->ReturnInstruction(compare, expr->id());
}
@@ -4778,301 +4900,309 @@
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(int argument_count) {
+void HGraphBuilder::GenerateIsSmi(int argument_count, int ast_id) {
ASSERT(argument_count == 1);
-
HValue* value = Pop();
- PushAndAdd(new HIsSmi(value));
+ HIsSmi* result = new HIsSmi(value);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateIsSpecObject(int argument_count) {
+void HGraphBuilder::GenerateIsSpecObject(int argument_count, int ast_id) {
ASSERT(argument_count == 1);
-
HValue* value = Pop();
- HHasInstanceType* test =
+ HHasInstanceType* result =
new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
- PushAndAdd(test);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateIsFunction(int argument_count) {
+void HGraphBuilder::GenerateIsFunction(int argument_count, int ast_id) {
+ ASSERT(argument_count == 1);
+ HValue* value = Pop();
+ HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
+ ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count,
+ int ast_id) {
+ ASSERT(argument_count == 1);
+ HValue* value = Pop();
+ HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
+ ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsArray(int argument_count, int ast_id) {
+ ASSERT(argument_count == 1);
+ HValue* value = Pop();
+ HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
+ ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsRegExp(int argument_count, int ast_id) {
+ ASSERT(argument_count == 1);
+ HValue* value = Pop();
+ HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
+ ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsObject(int argument_count, int ast_id) {
ASSERT(argument_count == 1);
HValue* value = Pop();
- HHasInstanceType* test =
- new HHasInstanceType(value, JS_FUNCTION_TYPE);
- PushAndAdd(test);
+ HIsObject* test = new HIsObject(value);
+ ast_context()->ReturnInstruction(test, ast_id);
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count) {
- ASSERT(argument_count == 1);
-
- HValue* value = Pop();
- HHasCachedArrayIndex* spec_test = new HHasCachedArrayIndex(value);
- PushAndAdd(spec_test);
-}
-
-
-void HGraphBuilder::GenerateIsArray(int argument_count) {
- ASSERT(argument_count == 1);
-
- HValue* value = Pop();
- HHasInstanceType* test =
- new HHasInstanceType(value, JS_ARRAY_TYPE);
- PushAndAdd(test);
-}
-
-
-void HGraphBuilder::GenerateIsRegExp(int argument_count) {
- ASSERT(argument_count == 1);
-
- HValue* value = Pop();
- HHasInstanceType* test =
- new HHasInstanceType(value, JS_REGEXP_TYPE);
- PushAndAdd(test);
-}
-
-
-void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count) {
+void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: IsNonNegativeSmi");
}
-void HGraphBuilder::GenerateIsObject(int argument_count) {
- BAILOUT("inlined runtime function: IsObject");
-}
-
-
-void HGraphBuilder::GenerateIsUndetectableObject(int argument_count) {
+void HGraphBuilder::GenerateIsUndetectableObject(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: IsUndetectableObject");
}
void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
- int argument_count) {
+ int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
}
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(int argument_count) {
+void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: IsConstructCall");
}
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(int argument_count) {
+void HGraphBuilder::GenerateArgumentsLength(int argument_count, int ast_id) {
ASSERT(argument_count == 0);
HInstruction* elements = AddInstruction(new HArgumentsElements);
- PushAndAdd(new HArgumentsLength(elements));
+ HArgumentsLength* result = new HArgumentsLength(elements);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateArguments(int argument_count) {
+void HGraphBuilder::GenerateArguments(int argument_count, int ast_id) {
ASSERT(argument_count == 1);
HValue* index = Pop();
HInstruction* elements = AddInstruction(new HArgumentsElements);
HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- PushAndAdd(new HAccessArgumentsAt(elements, length, index));
+ HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(int argument_count) {
+void HGraphBuilder::GenerateClassOf(int argument_count, int ast_id) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
BAILOUT("inlined runtime function: ClassOf");
}
-void HGraphBuilder::GenerateValueOf(int argument_count) {
+void HGraphBuilder::GenerateValueOf(int argument_count, int ast_id) {
ASSERT(argument_count == 1);
-
HValue* value = Pop();
- HValueOf* op = new HValueOf(value);
- PushAndAdd(op);
+ HValueOf* result = new HValueOf(value);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateSetValueOf(int argument_count) {
+void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: SetValueOf");
}
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(int argument_count) {
+void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: StringCharCodeAt");
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(int argument_count) {
+void HGraphBuilder::GenerateStringCharFromCode(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: StringCharFromCode");
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(int argument_count) {
+void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::StringCharAt, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::StringCharAt, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(int argument_count) {
+void HGraphBuilder::GenerateObjectEquals(int argument_count, int ast_id) {
ASSERT(argument_count == 2);
-
HValue* right = Pop();
HValue* left = Pop();
- PushAndAdd(new HCompareJSObjectEq(left, right));
+ HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateLog(int argument_count) {
+void HGraphBuilder::GenerateLog(int argument_count, int ast_id) {
UNREACHABLE(); // We caught this in VisitCallRuntime.
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(int argument_count) {
+void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: RandomHeapNumber");
}
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(int argument_count) {
+void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::StringAdd, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::StringAdd, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(int argument_count) {
+void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::SubString, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::SubString, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(int argument_count) {
+void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::StringCompare, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::StringCompare, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(int argument_count) {
+void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
ASSERT_EQ(4, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::RegExpExec, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::RegExpExec, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(int argument_count) {
+void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
+ int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::RegExpConstructResult, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result =
+ new HCallStub(CodeStub::RegExpConstructResult, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(int argument_count) {
+void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(int argument_count) {
+void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::NumberToString, argument_count),
- RelocInfo::kNoPosition);
+ HCallStub* result = new HCallStub(CodeStub::NumberToString, argument_count);
+ ast_context()->ReturnInstruction(result, ast_id);
}
// Fast swapping of elements. Takes three expressions, the object and two
// indices. This should only be used if the indices are known to be
// non-negative and within bounds of the elements array at the call site.
-void HGraphBuilder::GenerateSwapElements(int argument_count) {
+void HGraphBuilder::GenerateSwapElements(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: SwapElements");
}
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(int argument_count) {
+void HGraphBuilder::GenerateCallFunction(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: CallFunction");
}
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(int argument_count) {
+void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
- PushArgumentsForStubCall(argument_count);
- PushAndAdd(new HCallStub(CodeStub::MathPow, argument_count),
- RelocInfo::kNoPosition);
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HPower* result = new HPower(left, right);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateMathSin(int argument_count) {
+void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
- HCallStub* instr =
+ HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
- instr->set_transcendental_type(TranscendentalCache::SIN);
- PushAndAdd(instr, RelocInfo::kNoPosition);
+ result->set_transcendental_type(TranscendentalCache::SIN);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateMathCos(int argument_count) {
+void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
- HCallStub* instr =
+ HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
- instr->set_transcendental_type(TranscendentalCache::COS);
- PushAndAdd(instr, RelocInfo::kNoPosition);
+ result->set_transcendental_type(TranscendentalCache::COS);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateMathLog(int argument_count) {
+void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
- HCallStub* instr =
+ HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
- instr->set_transcendental_type(TranscendentalCache::LOG);
- PushAndAdd(instr, RelocInfo::kNoPosition);
+ result->set_transcendental_type(TranscendentalCache::LOG);
+ ast_context()->ReturnInstruction(result, ast_id);
}
-void HGraphBuilder::GenerateMathSqrt(int argument_count) {
+void HGraphBuilder::GenerateMathSqrt(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count) {
+void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: IsRegExpEquivalent");
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count) {
+void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: GetCachedArrayIndex");
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count) {
+void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count,
+ int ast_id) {
BAILOUT("inlined runtime function: FastAsciiArrayJoin");
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 91f3c9e..2c9aeac 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -136,14 +136,6 @@
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
- // If this block is a successor of a branch, his flags tells whether the
- // preceding branch was inverted or not.
- bool inverted() { return inverted_; }
- void set_inverted(bool b) { inverted_ = b; }
-
- HBasicBlock* deopt_predecessor() { return deopt_predecessor_; }
- void set_deopt_predecessor(HBasicBlock* block) { deopt_predecessor_ = block; }
-
Handle<Object> cond() { return cond_; }
void set_cond(Handle<Object> value) { cond_ = value; }
@@ -176,8 +168,6 @@
ZoneList<int> deleted_phis_;
SetOncePointer<HBasicBlock> parent_loop_header_;
bool is_inline_return_target_;
- bool inverted_;
- HBasicBlock* deopt_predecessor_;
Handle<Object> cond_;
};
@@ -557,10 +547,29 @@
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
+ // 'Fill' this context with a hydrogen value. The value is assumed to
+ // have already been inserted in the instruction stream (or not need to
+ // be, e.g., HPhi). Call this function in tail position in the Visit
+ // functions for expressions.
+ virtual void ReturnValue(HValue* value) = 0;
+
+ // Add a hydrogen instruction to the instruction stream (recording an
+ // environment simulation if necessary) and then fill this context with
+ // the instruction as value.
+ virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+
protected:
AstContext(HGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
+ HGraphBuilder* owner() const { return owner_; }
+
+ // We want to be able to assert, in a context-specific way, that the stack
+ // height makes sense when the context is filled.
+#ifdef DEBUG
+ int original_count_;
+#endif
+
private:
HGraphBuilder* owner_;
Expression::Context kind_;
@@ -573,6 +582,10 @@
explicit EffectContext(HGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
+ virtual ~EffectContext();
+
+ virtual void ReturnValue(HValue* value);
+ virtual void ReturnInstruction(HInstruction* instr, int ast_id);
};
@@ -581,6 +594,10 @@
explicit ValueContext(HGraphBuilder* owner)
: AstContext(owner, Expression::kValue) {
}
+ virtual ~ValueContext();
+
+ virtual void ReturnValue(HValue* value);
+ virtual void ReturnInstruction(HInstruction* instr, int ast_id);
};
@@ -588,16 +605,15 @@
public:
TestContext(HGraphBuilder* owner,
HBasicBlock* if_true,
- HBasicBlock* if_false,
- bool invert_true,
- bool invert_false)
+ HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
if_true_(if_true),
- if_false_(if_false),
- invert_true_(invert_true),
- invert_false_(invert_false) {
+ if_false_(if_false) {
}
+ virtual void ReturnValue(HValue* value);
+ virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
return reinterpret_cast<TestContext*>(context);
@@ -606,14 +622,13 @@
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
- bool invert_true() { return invert_true_; }
- bool invert_false() { return invert_false_; }
-
private:
+ // Build the shared core part of the translation unpacking a value into
+ // control flow.
+ void BuildBranch(HValue* value);
+
HBasicBlock* if_true_;
HBasicBlock* if_false_;
- bool invert_true_;
- bool invert_false_;
};
@@ -631,9 +646,25 @@
HGraph* CreateGraph(CompilationInfo* info);
+ // Simple accessors.
+ HGraph* graph() const { return graph_; }
+ HSubgraph* subgraph() const { return current_subgraph_; }
+
+ HEnvironment* environment() const { return subgraph()->environment(); }
+ HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
+
+ // Adding instructions.
+ HInstruction* AddInstruction(HInstruction* instr);
+ void AddSimulate(int id);
+
+ // Bailout environment manipulation.
+ void Push(HValue* value) { environment()->Push(value); }
+ HValue* Pop() { return environment()->Pop(); }
+
private:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count);
+ typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count,
+ int ast_id);
// Forward declarations for inner scope classes.
class SubgraphScope;
@@ -650,19 +681,14 @@
// Simple accessors.
TypeFeedbackOracle* oracle() const { return oracle_; }
- HGraph* graph() const { return graph_; }
- HSubgraph* subgraph() const { return current_subgraph_; }
AstContext* ast_context() const { return ast_context_; }
void set_ast_context(AstContext* context) { ast_context_ = context; }
AstContext* call_context() const { return call_context_; }
HBasicBlock* function_return() const { return function_return_; }
- HEnvironment* environment() const { return subgraph()->environment(); }
-
- HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
// Generators for inline runtime functions.
-#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
- void Generate##Name(int argument_count);
+#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
+ void Generate##Name(int argument_count, int ast_id);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
@@ -678,13 +704,7 @@
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
void AddToSubgraph(HSubgraph* graph, Expression* expr);
- void AddConditionToSubgraph(HSubgraph* subgraph,
- Expression* expr,
- HSubgraph* true_graph,
- HSubgraph* false_graph);
- void Push(HValue* value) { environment()->Push(value); }
- HValue* Pop() { return environment()->Pop(); }
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
@@ -693,33 +713,21 @@
void VisitForEffect(Expression* expr);
void VisitForControl(Expression* expr,
HBasicBlock* true_block,
- HBasicBlock* false_block,
- bool invert_true,
- bool invert_false);
+ HBasicBlock* false_block);
- // Visit an expression in a 'condition' context, i.e., in a control
- // context but not a subexpression of logical and, or, or not.
- void VisitCondition(Expression* expr,
- HBasicBlock* true_graph,
- HBasicBlock* false_graph,
- bool invert_true,
- bool invert_false);
// Visit an argument and wrap it in a PushArgument instruction.
HValue* VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int id);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
- void PushAndAdd(HInstruction* instr, int position);
void PushArgumentsForStubCall(int argument_count);
- // Initialize the arguments to the call based on then environment, add it
- // to the graph, and drop the arguments from the environment.
- void ProcessCall(HCall* call, int source_position);
+ // Remove the arguments from the bailout environment and emit instructions
+ // to push them as outgoing parameters.
+ void ProcessCall(HCall* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@@ -743,7 +751,7 @@
FunctionLiteral* function);
// Helpers for flow graph construction.
- void LookupGlobalPropertyCell(VariableProxy* expr,
+ void LookupGlobalPropertyCell(Variable* var,
LookupResult* lookup,
bool is_store);
@@ -753,10 +761,11 @@
bool TryMathFunctionInline(Call* expr);
void TraceInline(Handle<JSFunction> target, bool result);
- void HandleGlobalVariableAssignment(VariableProxy* proxy,
+ void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
- int position);
- void HandleGlobalVariableLoad(VariableProxy* expr);
+ int position,
+ int ast_id);
+
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicLoadNamedField(Property* expr,
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9582656..c173a3d 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2409,6 +2409,7 @@
emit_sse_operand(dst, src);
}
+
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2431,6 +2432,17 @@
}
+void Assembler::movd(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x7E);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2465,7 +2477,7 @@
}
-void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+void Assembler::psllq(XMMRegister reg, int8_t shift) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2473,7 +2485,32 @@
EMIT(0x0F);
EMIT(0x73);
emit_sse_operand(esi, reg); // esi == 6
- EMIT(imm8);
+ EMIT(shift);
+}
+
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x70);
+ emit_sse_operand(dst, src);
+ EMIT(shuffle);
+}
+
+
+void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x16);
+ emit_sse_operand(src, dst);
+ EMIT(offset);
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 2b4624c..11acb56 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -571,6 +571,15 @@
static const byte kTestEaxByte = 0xA9;
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
+ // One byte opcode for nop.
+ static const byte kNopByte = 0x90;
+
+ // One byte opcode for a short unconditional jump.
+ static const byte kJmpShortOpcode = 0xEB;
+ // One byte prefix for a short conditional jump.
+ static const byte kJccShortPrefix = 0x70;
+ static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+ static const byte kJcShortOpcode = kJccShortPrefix | carry;
// ---------------------------------------------------------------------------
// Code generation
@@ -905,13 +914,16 @@
void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, const Operand& src);
+ void movd(const Operand& src, XMMRegister dst);
void movsd(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
- void psllq(XMMRegister reg, int8_t imm8);
+ void psllq(XMMRegister reg, int8_t shift);
+ void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+ void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index c28e144..918f346 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -29,7 +29,6 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "code-stubs.h"
#include "codegen-inl.h"
#include "deoptimizer.h"
#include "full-codegen.h"
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3233be7..d75acab 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1366,8 +1366,8 @@
if (op_ == Token::DIV || op_ == Token::MOD) {
left = eax;
right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
}
@@ -2683,6 +2683,145 @@
}
+void TranscendentalCacheSSE2Stub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // esp[0]: return address.
+ // Input in registers:
+ // xmm1: untagged double input argument.
+ // Output:
+ // xmm1: untagged double result.
+ Label skip_cache;
+ Label call_runtime;
+
+ // Input is an untagged double in xmm1.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope sse4_scope(SSE4_1);
+ __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
+ } else {
+ __ pshufd(xmm0, xmm1, 0x1);
+ __ movd(Operand(edx), xmm0);
+ }
+ __ movd(Operand(ebx), xmm1);
+
+ // xmm1 = double value
+ // ebx = low 32 bits of double value
+ // edx = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ mov(ecx, ebx);
+ __ xor_(ecx, Operand(edx));
+ __ mov(eax, ecx);
+ __ sar(eax, 16);
+ __ xor_(ecx, Operand(eax));
+ __ mov(eax, ecx);
+ __ sar(eax, 8);
+ __ xor_(ecx, Operand(eax));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // xmm1 = double value.
+ // ebx = low 32 bits of double value.
+ // edx = high 32 bits of double value.
+ // ecx = TranscendentalCache::hash(double value).
+ __ mov(eax,
+ Immediate(ExternalReference::transcendental_cache_array_address()));
+ // Eax points to cache array.
+ __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // Eax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ test(eax, Operand(eax));
+ __ j(zero, &call_runtime);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+ // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ lea(ecx, Operand(eax, ecx, times_4, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ NearLabel cache_miss;
+ __ cmp(ebx, Operand(ecx, 0));
+ __ j(not_equal, &cache_miss);
+ __ cmp(edx, Operand(ecx, kIntSize));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ mov(eax, Operand(ecx, 2 * kIntSize));
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ // We are short on registers, so use no_reg as scratch.
+ // This gives slightly larger code.
+ __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), xmm1);
+ __ fld_d(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ GenerateOperation(masm);
+ __ mov(Operand(ecx, 0), ebx);
+ __ mov(Operand(ecx, kIntSize), edx);
+ __ mov(Operand(ecx, 2 * kIntSize), eax);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&skip_cache);
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), xmm1);
+ __ fld_d(Operand(esp, 0));
+ GenerateOperation(masm);
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(xmm1, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ __ Ret();
+
+ __ bind(&call_runtime);
+ __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ Ret();
+}
+
+
+Runtime::FunctionId TranscendentalCacheSSE2Stub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::LOG: return Runtime::kMath_log;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheSSE2Stub::GenerateOperation(MacroAssembler* masm) {
+ // Only free register is edi.
+ // Input value is on FP stack and in xmm1.
+
+ ASSERT(type_ == TranscendentalCache::LOG);
+ __ fldln2();
+ __ fxch();
+ __ fyl2x();
+}
+
+
// Get the integer part of a heap number. Surprisingly, all this bit twiddling
// is faster than using the built-in instructions on floating point registers.
// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
@@ -4901,76 +5040,125 @@
void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ // Fixed register usage throughout the stub.
+ Register object = eax; // Object (lhs).
+ Register map = ebx; // Map of the object.
+ Register function = edx; // Function (rhs).
+ Register prototype = edi; // Prototype of the function.
+ Register scratch = ecx;
+
+ // Get the object and function - they are always both needed.
+ Label slow, not_js_object;
+ if (!args_in_registers()) {
+ __ mov(object, Operand(esp, 2 * kPointerSize));
+ __ mov(function, Operand(esp, 1 * kPointerSize));
+ }
// Check that the left hand is a JS object.
- __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
- // Get the prototype of the function.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
- // edx is function, eax is map.
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, ¬_js_object, not_taken);
+ __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
// Look up the function and the map in the instanceof cache.
NearLabel miss;
ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(function,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ ret(2 * kPointerSize);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ __ IncrementCounter(&Counters::instance_of_cache, 1);
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&miss);
- __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(prototype, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
- __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
- // Register mapping:
- // eax is object map.
- // edx is function.
- // ebx is function prototype.
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+ // Update the golbal instanceof cache with the current map and function. The
+ // cached answer will be set when it is known.
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+ function);
- __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
+ // Loop through the prototype chain of the object looking for the function
+ // prototype.
+ __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
NearLabel loop, is_instance, is_not_instance;
__ bind(&loop);
- __ cmp(ecx, Operand(ebx));
+ __ cmp(scratch, Operand(prototype));
__ j(equal, &is_instance);
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ cmp(Operand(scratch), Immediate(Factory::null_value()));
__ j(equal, &is_not_instance);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
+ __ IncrementCounter(&Counters::instance_of_stub_true, 1);
__ Set(eax, Immediate(0));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&is_not_instance);
+ __ IncrementCounter(&Counters::instance_of_stub_false, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(¬_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ test(function, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, &slow, not_taken);
+
+ // Null is not instance of anything.
+ __ cmp(object, Factory::null_value());
+ __ j(not_equal, &object_not_null);
+ __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null);
+ // Smi values is not instance of anything.
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(not_zero, &object_not_null_or_smi, not_taken);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null_or_smi);
+ // String values is not instance of anything.
+ Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
+ __ j(NegateCondition(is_string), &slow);
+ __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
+ if (args_in_registers()) {
+ // Push arguments below return address.
+ __ pop(scratch);
+ __ push(object);
+ __ push(function);
+ __ push(scratch);
+ }
+ __ IncrementCounter(&Counters::instance_of_slow, 1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 2973101..04f23ac 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -45,6 +45,7 @@
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
+
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
@@ -52,6 +53,24 @@
};
+// Check the transcendental cache, or generate the result, using SSE2.
+// The argument and result will be in xmm1.
+// Only supports TranscendentalCache::LOG at this point.
+class TranscendentalCacheSSE2Stub: public CodeStub {
+ public:
+ explicit TranscendentalCacheSSE2Stub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+
+ Major MajorKey() { return TranscendentalCacheSSE2; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm);
+};
+
+
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@@ -231,7 +250,8 @@
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- TypeRecordingBinaryOpStub(int key,
+ TypeRecordingBinaryOpStub(
+ int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
@@ -239,8 +259,7 @@
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
- name_(NULL) {
- }
+ name_(NULL) { }
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 022c117..4c9d055 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -7676,6 +7676,13 @@
__ test(tmp2.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
+ // Check that both indices are valid.
+ __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
+ __ cmp(tmp2.reg(), Operand(index1.reg()));
+ deferred->Branch(below_equal);
+ __ cmp(tmp2.reg(), Operand(index2.reg()));
+ deferred->Branch(below_equal);
+
// Bring addresses into index1 and index2.
__ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
__ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
@@ -9133,7 +9140,7 @@
case Token::INSTANCEOF: {
if (!left_already_loaded) Load(left);
Load(right);
- InstanceofStub stub;
+ InstanceofStub stub(InstanceofStub::kNoFlags);
Result answer = frame_->CallStub(&stub, 2);
answer.ToRegister();
__ test(answer.reg(), Operand(answer.reg()));
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 3734fca..dfbcbb7 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1107,6 +1107,21 @@
} else {
UnimplementedInstruction();
}
+ } else if (*data == 0x3A) {
+ data++;
+ if (*data == 0x16) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pextrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else {
+ UnimplementedInstruction();
+ }
} else if (*data == 0x2E || *data == 0x2F) {
const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
data++;
@@ -1129,6 +1144,14 @@
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x54) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("andpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
@@ -1149,6 +1172,25 @@
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
+ } else if (*data == 0x70) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pshufd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x73) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("psllq %s,%d",
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
@@ -1156,6 +1198,21 @@
get_modrm(*data, &mod, ®op, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0x7E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movd ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xDB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pand %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0xE7) {
AppendToBuffer("movntdq ");
data++;
@@ -1164,38 +1221,13 @@
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("pxor %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xDB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("pand %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("psllq %s,%d",
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pxor %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 1f7095f..be059cd 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -41,8 +41,61 @@
namespace v8 {
namespace internal {
+
#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm)
+ : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, NearLabel* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -198,6 +251,11 @@
}
+void FullCodeGenerator::ClearAccumulator() {
+ __ Set(eax, Immediate(Smi::FromInt(0)));
+}
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
NearLabel ok;
@@ -687,10 +745,9 @@
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -715,12 +772,13 @@
// Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -730,9 +788,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
-
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, &patch_site);
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
@@ -911,7 +968,9 @@
__ bind(&update_each);
__ mov(result_register(), ebx);
// Perform the assignment as if via '='.
- EmitAssignment(stmt->each());
+ { EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->AssignmentId());
+ }
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1478,7 +1537,7 @@
// For property compound assignments we need another deoptimization
// point after the property load.
if (property != NULL) {
- PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
}
Token::Value op = expr->binary_op();
@@ -1521,6 +1580,8 @@
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1552,12 +1613,11 @@
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
- NearLabel call_stub;
- Label done;
+ NearLabel call_stub, done;
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &done);
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
@@ -1570,7 +1630,8 @@
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
+
__ bind(&done);
context()->Plug(eax);
}
@@ -1580,7 +1641,7 @@
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
- Label call_stub, done;
+ NearLabel call_stub, done;
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
@@ -1589,8 +1650,8 @@
__ sub(Operand(eax), Immediate(value));
}
__ j(overflow, &call_stub);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &done);
__ bind(&call_stub);
if (left_is_constant_smi) {
@@ -1603,7 +1664,8 @@
}
Token::Value op = Token::SUB;
TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
+
__ bind(&done);
context()->Plug(eax);
}
@@ -1613,19 +1675,21 @@
Token::Value op,
OverwriteMode mode,
Smi* value) {
- Label call_stub, smi_case, done;
+ NearLabel call_stub, smi_case, done;
int shift_value = value->value() & 0x1f;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi_case);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &smi_case);
+ // Call stub.
__ bind(&call_stub);
__ mov(edx, eax);
__ mov(eax, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
+ // Smi case.
__ bind(&smi_case);
switch (op) {
case Token::SHL:
@@ -1675,17 +1739,19 @@
Token::Value op,
OverwriteMode mode,
Smi* value) {
- Label smi_case, done;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi_case);
+ NearLabel smi_case, done;
+
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &smi_case);
// The order of the arguments does not matter for bit-ops with a
// constant operand.
__ mov(edx, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
+ // Smi case.
__ bind(&smi_case);
switch (op) {
case Token::BIT_OR:
@@ -1753,19 +1819,20 @@
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
- Label done, stub_call, smi_case;
+ NearLabel done, smi_case, stub_call;
__ pop(edx);
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi_case);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &smi_case);
__ bind(&stub_call);
__ mov(eax, ecx);
TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
+ // Smi case.
__ bind(&smi_case);
__ mov(eax, edx); // Copy left operand in case of a stub call.
@@ -1844,12 +1911,12 @@
OverwriteMode mode) {
__ pop(edx);
TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
context()->Plug(eax);
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1897,6 +1964,8 @@
break;
}
}
+ PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+ context()->Plug(eax);
}
@@ -1969,8 +2038,6 @@
}
__ bind(&done);
}
-
- context()->Plug(eax);
}
@@ -2007,10 +2074,10 @@
__ push(Operand(esp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
- context()->DropAndPlug(1, eax);
- } else {
- context()->Plug(eax);
+ __ Drop(1);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
}
@@ -2048,6 +2115,7 @@
__ pop(eax);
}
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -3103,6 +3171,13 @@
__ test(temp, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case);
+ // Check that both indices are valid.
+ __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
+ __ cmp(temp, Operand(index_1));
+ __ j(below_equal, &slow_case);
+ __ cmp(temp, Operand(index_2));
+ __ j(below_equal, &slow_case);
+
// Bring addresses into index1 and index2.
__ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
__ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
@@ -3708,8 +3783,9 @@
}
// Inline smi case if we are in a loop.
- NearLabel stub_call;
- Label done;
+ NearLabel stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
@@ -3719,8 +3795,8 @@
__ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ patch_site.EmitJumpIfSmi(eax, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
@@ -3738,7 +3814,7 @@
__ mov(eax, Immediate(Smi::FromInt(1)));
TypeRecordingBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE);
- __ CallStub(&stub);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in eax.
@@ -3749,6 +3825,8 @@
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(eax);
}
// For all contexts except EffectContext We have the result on
// top of the stack.
@@ -3759,6 +3837,8 @@
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
}
break;
case NAMED_PROPERTY: {
@@ -3766,6 +3846,7 @@
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3780,6 +3861,7 @@
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
if (!context()->IsEffect()) {
@@ -3957,7 +4039,8 @@
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub;
+ __ IncrementCounter(&Counters::instance_of_full, 1);
+ InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@@ -4005,21 +4088,22 @@
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
__ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
// Record position and call the compare IC.
- Handle<Code> ic = CompareIC::GetUninitialized(op);
SetSourcePosition(expr->position());
- __ call(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through);
@@ -4123,6 +4207,16 @@
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index b34179a..9c9304d 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -2049,13 +2049,23 @@
}
+static bool HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
-#ifdef DEBUG
State previous_state = GetState();
-#endif
- State state = TargetState(x, y);
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
@@ -2073,6 +2083,44 @@
Token::Name(op_));
}
#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ ASSERT(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ address, test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. There must be a
+ // short jump-if-carry/not-carry at this position.
+ Address jmp_address = test_instruction_address - delta;
+ ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode);
+ Condition cc = *jmp_address == Assembler::kJncShortOpcode
+ ? not_zero
+ : zero;
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index dc0f5e9..0e97737 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -315,6 +315,13 @@
__ call(code, mode);
RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
}
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
}
@@ -1403,6 +1410,71 @@
}
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object) {
+ ASSERT(!input.is(temp1));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp1.is(temp2));
+
+ __ test(input, Immediate(kSmiTagMask));
+ __ j(equal, is_not_object);
+
+ __ cmp(input, Factory::null_value());
+ __ j(equal, is_object);
+
+ __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
+ __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, is_not_object);
+
+ __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+ __ j(below, is_not_object);
+ __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+ return below_equal;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+ Register reg = ToRegister(instr->input());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ __ j(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ mov(result, Handle<Object>(Heap::false_value()));
+ __ jmp(&done);
+
+ __ bind(&is_true);
+ __ mov(result, Handle<Object>(Heap::true_value()));
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->input());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmi(LIsSmi* instr) {
Operand input = ToOperand(instr->input());
Register result = ToRegister(instr->result());
@@ -1627,9 +1699,8 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- InstanceofStub stub;
- __ push(ToOperand(instr->left()));
- __ push(ToOperand(instr->right()));
+ // Object and function are in fixed registers eax and edx.
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
NearLabel true_value, done;
@@ -1647,9 +1718,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- InstanceofStub stub;
- __ push(ToOperand(instr->left()));
- __ push(ToOperand(instr->right()));
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ test(eax, Operand(eax));
EmitBranch(true_block, false_block, zero);
@@ -2174,6 +2243,82 @@
}
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ XMMRegister xmm_scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->input());
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ ExternalReference negative_infinity =
+ ExternalReference::address_of_negative_infinity();
+ __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
+ __ ucomisd(xmm_scratch, input_reg);
+ DeoptimizeIf(equal, instr->environment());
+ __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ // It is safe to use ebx directly since the instruction is marked
+ // as a call.
+ __ PrepareCallCFunction(4, ebx);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+ __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ } else if (exponent_type.IsInteger32()) {
+ // It is safe to use ebx directly since the instruction is marked
+ // as a call.
+ ASSERT(!ToRegister(right).is(ebx));
+ __ PrepareCallCFunction(4, ebx);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+ __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
+ __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ CpuFeatures::Scope scope(SSE2);
+ Register right_reg = ToRegister(right);
+
+ Label non_smi, call;
+ __ test(right_reg, Immediate(kSmiTagMask));
+ __ j(not_zero, &non_smi);
+ __ SmiUntag(right_reg);
+ __ cvtsi2sd(result_reg, Operand(right_reg));
+ __ jmp(&call);
+
+ __ bind(&non_smi);
+ // It is safe to use ebx directly since the instruction is marked
+ // as a call.
+ ASSERT(!right_reg.is(ebx));
+ __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+ __ bind(&call);
+ __ PrepareCallCFunction(4, ebx);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+ __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ }
+
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(result_reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheSSE2Stub stub(TranscendentalCache::LOG);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@@ -2188,6 +2333,13 @@
case kMathSqrt:
DoMathSqrt(instr);
break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+
default:
UNREACHABLE();
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 91b3fab..5ac7b1e 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -175,6 +175,8 @@
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
+ void DoMathLog(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
@@ -195,6 +197,15 @@
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index e3a3d7b..e1148fc 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -206,6 +206,13 @@
}
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+ stream->Add("if is_object(");
+ input()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_smi(");
input()->PrintTo(stream);
@@ -460,12 +467,6 @@
}
-int LChunk::NearestNextGapPos(int index) const {
- while (!IsGapAt(index)) index++;
- return index;
-}
-
-
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
}
@@ -880,19 +881,6 @@
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- HBasicBlock* deopt_predecessor = instr->block()->deopt_predecessor();
- if (deopt_predecessor != NULL &&
- deopt_predecessor->inverted()) {
- HEnvironment* env = current_block_->last_environment();
- HValue* value = env->Pop();
- ASSERT(value->IsConstant());
- Handle<Object> obj = HConstant::cast(value)->handle();
- ASSERT(*obj == *Factory::true_value() || *obj == *Factory::false_value());
- env->Push(*obj == *Factory::true_value()
- ? current_block_->graph()->GetConstantFalse()
- : current_block_->graph()->GetConstantTrue());
- }
-
return new LLabel(instr->block());
}
@@ -1257,6 +1245,17 @@
temp,
first_id,
second_id);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+ temp1,
+ temp2,
+ first_id,
+ second_id);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@@ -1266,8 +1265,8 @@
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
- new LInstanceOfAndBranch(Use(instance_of->left()),
- Use(instance_of->right()),
+ new LInstanceOfAndBranch(UseFixed(instance_of->left(), eax),
+ UseFixed(instance_of->right(), edx),
first_id,
second_id);
return MarkAsCall(result, instr);
@@ -1317,7 +1316,8 @@
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* result =
- new LInstanceOf(Use(instr->left()), Use(instr->right()));
+ new LInstanceOf(UseFixed(instr->left(), eax),
+ UseFixed(instr->right(), edx));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1337,7 +1337,7 @@
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
- LOperand* argument = Use(instr->argument());
+ LOperand* argument = UseOrConstant(instr->argument());
return new LPushArgument(argument);
}
@@ -1360,21 +1360,29 @@
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- MathFunctionId op = instr->op();
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = new LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
+ BuiltinFunctionId op = instr->op();
+ if (op == kMathLog) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LInstruction* result = new LUnaryMathOperation(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LInstruction* result = new LUnaryMathOperation(input);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ case kMathFloor:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathSqrt:
+ return DefineSameAsFirst(result);
+ case kMathPowHalf:
+ return AssignEnvironment(DefineSameAsFirst(result));
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
}
}
@@ -1572,6 +1580,22 @@
}
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), xmm1);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), xmm2) :
+ UseFixed(instr->right(), eax);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
if (instr->left()->representation().IsInteger32()) {
@@ -1612,6 +1636,14 @@
}
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+
+ return DefineAsRegister(new LIsObject(value, TempRegister()));
+}
+
+
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index af0d560..3f48e50 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -67,6 +67,7 @@
// LLoadKeyedGeneric
// LModI
// LMulI
+// LPower
// LShiftI
// LSubI
// LCallConstantFunction
@@ -123,6 +124,8 @@
// LInteger32ToDouble
// LIsNull
// LIsNullAndBranch
+// LIsObject
+// LIsObjectAndBranch
// LIsSmi
// LIsSmiAndBranch
// LLoadNamedField
@@ -205,6 +208,8 @@
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
+ V(IsObject) \
+ V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(HasInstanceType) \
@@ -229,6 +234,7 @@
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
+ V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -668,7 +674,7 @@
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
- MathFunctionId op() const { return hydrogen()->op(); }
+ BuiltinFunctionId op() const { return hydrogen()->op(); }
};
@@ -745,6 +751,48 @@
};
+class LIsObject: public LUnaryOperation {
+ public:
+ LIsObject(LOperand* value, LOperand* temp)
+ : LUnaryOperation(value), temp_(temp) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+ LOperand* temp() const { return temp_; }
+
+ private:
+ LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+ LIsObjectAndBranch(LOperand* value,
+ LOperand* temp,
+ LOperand* temp2,
+ int true_block_id,
+ int false_block_id)
+ : LIsObject(value, temp),
+ temp2_(temp2),
+ true_block_id_(true_block_id),
+ false_block_id_(false_block_id) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ virtual void PrintDataTo(StringStream* stream) const;
+ virtual bool IsControl() const { return true; }
+
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+
+ LOperand* temp2() const { return temp2_; }
+
+ private:
+ LOperand* temp2_;
+ int true_block_id_;
+ int false_block_id_;
+};
+
+
class LIsSmi: public LUnaryOperation {
public:
explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
@@ -1154,6 +1202,16 @@
};
+class LPower: public LBinaryOperation {
+ public:
+ LPower(LOperand* left, LOperand* right)
+ : LBinaryOperation(left, right) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
class LArithmeticD: public LBinaryOperation {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1890,7 +1948,6 @@
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
- int NearestNextGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 84911ec..7c33906 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -74,30 +74,6 @@
}
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- ASSERT(cc == equal || cc == not_equal);
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- mov(scratch, Operand(object));
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
- cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
- j(cc, branch);
- } else {
- int32_t new_space_start = reinterpret_cast<int32_t>(
- ExternalReference::new_space_start().address());
- lea(scratch, Operand(object, -new_space_start));
- and_(scratch, Heap::NewSpaceMask());
- j(cc, branch);
- }
-}
-
-
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@@ -109,7 +85,7 @@
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
- Label done;
+ NearLabel done;
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
@@ -1216,25 +1192,29 @@
}
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly. If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
+// If true, a Handle<T> returned by value from a function with cdecl calling
+// convention will be returned directly as a value of location_ field in a
+// register eax.
+// If false, it is returned as a pointer to a preallocated by caller memory
+// region. Pointer to this region should be passed to a function as an
+// implicit first argument.
+#if defined(USING_BSD_ABI) || defined(__MINGW32__)
+static const bool kReturnHandlesDirectly = true;
#else
-static const bool kPassHandlesDirectly = false;
+static const bool kReturnHandlesDirectly = false;
#endif
Operand ApiParameterOperand(int index) {
- return Operand(esp, (index + (kPassHandlesDirectly ? 0 : 1)) * kPointerSize);
+ return Operand(
+ esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
}
void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
- if (kPassHandlesDirectly) {
+ if (kReturnHandlesDirectly) {
EnterApiExitFrame(argc);
- // When handles as passed directly we don't have to allocate extra
+ // When handles are returned directly we don't have to allocate extra
// space for and pass an out parameter.
} else {
// We allocate two additional slots: return value and pointer to it.
@@ -1279,7 +1259,7 @@
// Call the api function!
call(function->address(), RelocInfo::RUNTIME_ENTRY);
- if (!kPassHandlesDirectly) {
+ if (!kReturnHandlesDirectly) {
// The returned value is a pointer to the handle holding the result.
// Dereference this to get to the location.
mov(eax, Operand(eax, 0));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 8407802..6f5fa87 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -70,10 +70,11 @@
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
+ template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
- Label* branch);
+ LabelType* branch);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -658,6 +659,31 @@
};
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ LabelType* branch) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ mov(scratch, Operand(object));
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
+ cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
+ j(cc, branch);
+ } else {
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start().address());
+ lea(scratch, Operand(object, -new_space_start));
+ and_(scratch, Heap::NewSpaceMask());
+ j(cc, branch);
+ }
+}
+
+
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 352eae1..99888b0 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -2133,8 +2133,8 @@
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, NULL, function, name);
Object* result;
@@ -2375,8 +2375,8 @@
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, cell, function, name);
Object* result;
diff --git a/src/ic.cc b/src/ic.cc
index cda0b15..645c6fd 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1951,7 +1951,7 @@
TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
- TRBinaryOpIC::TypeInfo y) {
+ TRBinaryOpIC::TypeInfo y) {
if (x == UNINITIALIZED) return y;
if (y == UNINITIALIZED) return x;
if (x == STRING && y == STRING) return STRING;
@@ -2041,6 +2041,11 @@
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
+
+ // Activate inlined smi code.
+ if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
+ PatchInlinedSmiCode(ic.address());
+ }
}
Handle<JSBuiltinsObject> builtins = Top::builtins();
@@ -2127,13 +2132,17 @@
}
-CompareIC::State CompareIC::TargetState(Handle<Object> x, Handle<Object> y) {
- State state = GetState();
- if (state != UNINITIALIZED) return GENERIC;
- if (x->IsSmi() && y->IsSmi()) return SMIS;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+CompareIC::State CompareIC::TargetState(State state,
+ bool has_inlined_smi_code,
+ Handle<Object> x,
+ Handle<Object> y) {
+ if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
+ if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
+ if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
+ x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
- if (x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+ if (state == UNINITIALIZED &&
+ x->IsJSObject() && y->IsJSObject()) return OBJECTS;
return GENERIC;
}
diff --git a/src/ic.h b/src/ic.h
index 434c502..8562bcc 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -582,7 +582,8 @@
static const char* GetStateName(State state);
private:
- State TargetState(Handle<Object> x, Handle<Object> y);
+ State TargetState(State state, bool has_inlined_smi_code,
+ Handle<Object> x, Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
@@ -591,6 +592,8 @@
Token::Value op_;
};
+// Helper for TRBinaryOpIC and CompareIC.
+void PatchInlinedSmiCode(Address address);
} } // namespace v8::internal
diff --git a/src/json.js b/src/json.js
index e8b732a..89009a9 100644
--- a/src/json.js
+++ b/src/json.js
@@ -66,21 +66,10 @@
}
}
-function StackContains(stack, val) {
- var length = stack.length;
- for (var i = 0; i < length; i++) {
- if (stack[i] === val) {
- return true;
- }
- }
- return false;
-}
-
function SerializeArray(value, replacer, stack, indent, gap) {
- if (StackContains(stack, value)) {
+ if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
- stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
@@ -108,10 +97,9 @@
}
function SerializeObject(value, replacer, stack, indent, gap) {
- if (StackContains(stack, value)) {
+ if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
- stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
@@ -158,49 +146,47 @@
function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
- if (IS_OBJECT(value) && value) {
+ if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) {
- value = toJSON.call(value, key);
+ value = %_CallFunction(value, key, toJSON);
}
}
if (IS_FUNCTION(replacer)) {
- value = replacer.call(holder, key, value);
+ value = %_CallFunction(holder, key, value, replacer);
}
- // Unwrap value if necessary
- if (IS_OBJECT(value)) {
- if (IS_NUMBER_WRAPPER(value)) {
- value = $Number(value);
+ if (IS_STRING(value)) {
+ return %QuoteJSONString(value);
+ } else if (IS_NUMBER(value)) {
+ return $isFinite(value) ? $String(value) : "null";
+ } else if (IS_BOOLEAN(value)) {
+ return value ? "true" : "false";
+ } else if (IS_NULL(value)) {
+ return "null";
+ } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+ // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
+ if (IS_ARRAY(value)) {
+ return SerializeArray(value, replacer, stack, indent, gap);
+ } else if (IS_NUMBER_WRAPPER(value)) {
+ value = ToNumber(value);
+ return $isFinite(value) ? ToString(value) : "null";
} else if (IS_STRING_WRAPPER(value)) {
- value = $String(value);
+ return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
- value = %_ValueOf(value);
+ return %_ValueOf(value) ? "true" : "false";
+ } else {
+ return SerializeObject(value, replacer, stack, indent, gap);
}
}
- switch (typeof value) {
- case "string":
- return %QuoteJSONString(value);
- case "object":
- if (!value) {
- return "null";
- } else if (IS_ARRAY(value)) {
- return SerializeArray(value, replacer, stack, indent, gap);
- } else {
- return SerializeObject(value, replacer, stack, indent, gap);
- }
- case "number":
- return $isFinite(value) ? $String(value) : "null";
- case "boolean":
- return value ? "true" : "false";
- }
+ // Undefined or a callable object.
+ return void 0;
}
function BasicSerializeArray(value, stack, builder) {
- if (StackContains(stack, value)) {
+ if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
- stack.push(value);
builder.push("[");
var len = value.length;
for (var i = 0; i < len; i++) {
@@ -220,10 +206,9 @@
function BasicSerializeObject(value, stack, builder) {
- if (StackContains(stack, value)) {
+ if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
- stack.push(value);
builder.push("{");
for (var p in value) {
if (%HasLocalProperty(value, p)) {
@@ -250,40 +235,41 @@
function BasicJSONSerialize(key, holder, stack, builder) {
var value = holder[key];
- if (IS_OBJECT(value) && value) {
+ if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
- if (IS_FUNCTION(toJSON)) value = toJSON.call(value, $String(key));
+ if (IS_FUNCTION(toJSON)) {
+ value = %_CallFunction(value, ToString(key), toJSON);
+ }
}
if (IS_STRING(value)) {
builder.push(%QuoteJSONString(value));
} else if (IS_NUMBER(value)) {
builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
} else if (IS_BOOLEAN(value)) {
- builder.push((value ? "true" : "false"));
- } else if (IS_OBJECT(value)) {
+ builder.push(value ? "true" : "false");
+ } else if (IS_NULL(value)) {
+ builder.push("null");
+ } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+ // Value is a non-callable object.
// Unwrap value if necessary
if (IS_NUMBER_WRAPPER(value)) {
- value = %_ValueOf(value);
- builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
+ value = ToNumber(value);
+ builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
} else if (IS_STRING_WRAPPER(value)) {
- builder.push(%QuoteJSONString(%_ValueOf(value)));
+ builder.push(%QuoteJSONString(ToString(value)));
} else if (IS_BOOLEAN_WRAPPER(value)) {
- builder.push((%_ValueOf(value) ? "true" : "false"));
+ builder.push(%_ValueOf(value) ? "true" : "false");
+ } else if (IS_ARRAY(value)) {
+ BasicSerializeArray(value, stack, builder);
} else {
- // Regular non-wrapped object
- if (!value) {
- builder.push("null");
- } else if (IS_ARRAY(value)) {
- BasicSerializeArray(value, stack, builder);
- } else {
- BasicSerializeObject(value, stack, builder);
- }
+ BasicSerializeObject(value, stack, builder);
}
}
}
+
function JSONStringify(value, replacer, space) {
- if (IS_UNDEFINED(replacer) && IS_UNDEFINED(space)) {
+ if (%_ArgumentsLength() == 1) {
var builder = [];
BasicJSONSerialize('', {'': value}, [], builder);
if (builder.length == 0) return;
@@ -294,21 +280,18 @@
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
- space = $Number(space);
+ space = ToNumber(space);
} else if (IS_STRING_WRAPPER(space)) {
- space = $String(space);
+ space = ToString(space);
}
}
var gap;
if (IS_NUMBER(space)) {
- space = $Math.min(ToInteger(space), 10);
- gap = "";
- for (var i = 0; i < space; i++) {
- gap += " ";
- }
+ space = MathMax(0, MathMin(ToInteger(space), 10));
+ gap = SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
- gap = space.substring(0, 10);
+ gap = SubString(space, 0, 10);
} else {
gap = space;
}
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index db0bc8b..ac61c17 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -247,7 +247,7 @@
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
- if (assigned_double_) {
+ if (IsDouble()) {
op = LDoubleRegister::Create(assigned_register());
} else {
op = LRegister::Create(assigned_register());
@@ -290,19 +290,27 @@
void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
- ASSERT(Start().Value() <= position.Value());
+ ASSERT(Start().Value() < position.Value());
ASSERT(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
UseInterval* current = FirstSearchIntervalForPosition(position);
+
+ // If the split position coincides with the beginning of a use interval
+ // we need to split use positons in a special way.
+ bool split_at_start = false;
+
while (current != NULL) {
if (current->Contains(position)) {
current->SplitAt(position);
break;
}
UseInterval* next = current->next();
- if (next->start().Value() >= position.Value()) break;
+ if (next->start().Value() >= position.Value()) {
+ split_at_start = (next->start().Value() == position.Value());
+ break;
+ }
current = next;
}
@@ -319,9 +327,19 @@
// position after it.
UsePosition* use_after = first_pos_;
UsePosition* use_before = NULL;
- while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
+ if (split_at_start) {
+ // The split position coincides with the beginning of a use interval (the
+ // end of a lifetime hole). Use at this position should be attributed to
+ // the split child because split child owns use interval covering it.
+ while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+ use_before = use_after;
+ use_after = use_after->next();
+ }
+ } else {
+ while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+ use_before = use_after;
+ use_after = use_after->next();
+ }
}
// Partition original use positions to the two live ranges.
@@ -508,7 +526,7 @@
}
if (a->start().Value() < b->start().Value()) {
a = a->next();
- if (a == NULL && a->start().Value() > other->End().Value()) break;
+ if (a == NULL || a->start().Value() > other->End().Value()) break;
AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
} else {
b = b->next();
@@ -567,17 +585,12 @@
LifetimePosition start = LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::FromInstructionIndex(
- block->last_instruction_index());
+ block->last_instruction_index()).NextInstruction();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
- if (!range->IsEmpty() &&
- range->Start().Value() == end.NextInstruction().Value()) {
- range->AddUseInterval(start, end.NextInstruction());
- } else {
- range->AddUseInterval(start, end);
- }
+ range->AddUseInterval(start, end);
iterator.Advance();
}
}
@@ -625,7 +638,7 @@
if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index));
ASSERT(result->IsFixed());
- result->set_assigned_register(index, false);
+ result->set_assigned_register(index, GENERAL_REGISTERS);
fixed_live_ranges_[index] = result;
}
return result;
@@ -642,7 +655,7 @@
if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index));
ASSERT(result->IsFixed());
- result->set_assigned_register(index, true);
+ result->set_assigned_register(index, DOUBLE_REGISTERS);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -960,8 +973,8 @@
}
}
}
- Use(block_start_position, curr_position, temp, NULL);
- Define(curr_position.PrevInstruction(), temp, NULL);
+ Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+ Define(curr_position, temp, NULL);
}
}
}
@@ -1258,14 +1271,6 @@
}
-void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
- mode_ = CPU_REGISTERS;
- AllocateRegisters();
-}
-
-
bool LAllocator::SafePointsAreInOrder() const {
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
int safe_point = 0;
@@ -1397,10 +1402,18 @@
}
+void LAllocator::AllocateGeneralRegisters() {
+ HPhase phase("Allocate general registers", this);
+ num_registers_ = Register::kNumAllocatableRegisters;
+ mode_ = GENERAL_REGISTERS;
+ AllocateRegisters();
+}
+
+
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("Allocate double registers", this);
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
- mode_ = XMM_REGISTERS;
+ mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1411,7 +1424,7 @@
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
- if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) {
+ if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
@@ -1422,7 +1435,7 @@
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
- if (mode_ == XMM_REGISTERS) {
+ if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
@@ -1463,11 +1476,7 @@
current->Start().NextInstruction().Value()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
- LiveRange* part = Split(current,
- current->Start().NextInstruction(),
- pos->pos());
- Spill(current);
- AddToUnhandledSorted(part);
+ SpillBetween(current, current->Start(), pos->pos());
ASSERT(UnhandledIsSorted());
continue;
}
@@ -1521,6 +1530,16 @@
}
+const char* LAllocator::RegisterName(int allocation_index) {
+ ASSERT(mode_ != NONE);
+ if (mode_ == GENERAL_REGISTERS) {
+ return Register::AllocationIndexToString(allocation_index);
+ } else {
+ return DoubleRegister::AllocationIndexToString(allocation_index);
+ }
+}
+
+
void LAllocator::TraceAlloc(const char* msg, ...) {
if (FLAG_trace_alloc) {
va_list arguments;
@@ -1544,10 +1563,12 @@
}
-bool LAllocator::HasDoubleValue(int virtual_register) const {
+RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register);
- if (value == NULL) return false;
- return value->representation().IsDouble();
+ if (value != NULL && value->representation().IsDouble()) {
+ return DOUBLE_REGISTERS;
+ }
+ return GENERAL_REGISTERS;
}
@@ -1728,16 +1749,22 @@
}
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+ Register::kNumAllocatableRegisters);
+
+
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition max_pos = LifetimePosition::FromInstructionIndex(
- chunk_->instructions()->length() + 1);
- ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- free_pos(max_pos);
+ LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ free_until_pos[i] = LifetimePosition::MaxPosition();
+ }
+
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* cur_active = active_live_ranges_.at(i);
- free_pos[cur_active->assigned_register()] =
+ free_until_pos[cur_active->assigned_register()] =
LifetimePosition::FromInstructionIndex(0);
}
@@ -1748,67 +1775,83 @@
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
- free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection);
+ free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
- UsePosition* pos = current->FirstPosWithHint();
- if (pos != NULL) {
- LOperand* hint = pos->hint();
+ UsePosition* hinted_use = current->FirstPosWithHint();
+ if (hinted_use != NULL) {
+ LOperand* hint = hinted_use->hint();
if (hint->IsRegister() || hint->IsDoubleRegister()) {
int register_index = hint->index();
- TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n",
- register_index,
- current->id(),
- free_pos[register_index].Value(),
- current->End().Value());
- if (free_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %d to live range %d\n",
- register_index,
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index),
+ free_until_pos[register_index].Value(),
+ current->id(),
+ current->End().Value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index),
current->id());
- current->set_assigned_register(register_index, mode_ == XMM_REGISTERS);
+ current->set_assigned_register(register_index, mode_);
return true;
}
}
}
- int max_reg = 0;
+ // Find the register which stays free for the longest time.
+ int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
- if (free_pos[i].Value() > free_pos[max_reg].Value()) {
- max_reg = i;
+ if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+ reg = i;
}
}
- if (free_pos[max_reg].InstructionIndex() == 0) {
+ LifetimePosition pos = free_until_pos[reg];
+
+ if (pos.Value() <= current->Start().Value()) {
+ // All registers are blocked.
return false;
- } else if (free_pos[max_reg].Value() >= current->End().Value()) {
- TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id());
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
- } else {
- // Split the interval at the nearest gap and never split an interval at its
- // start position.
- LifetimePosition pos =
- LifetimePosition::FromInstructionIndex(
- chunk_->NearestGapPos(free_pos[max_reg].InstructionIndex()));
- if (pos.Value() <= current->Start().Value()) return false;
- LiveRange* second_range = Split(current, pos);
- AddToUnhandledSorted(second_range);
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
}
+ if (pos.Value() < current->End().Value()) {
+ // Register reg is available at the range start but becomes blocked before
+ // the range end. Split current at position where it becomes blocked.
+ LiveRange* tail = SplitAt(current, pos);
+ AddToUnhandledSorted(tail);
+ }
+
+
+ // Register reg is available at the range start and is free until
+ // the range end.
+ ASSERT(pos.Value() >= current->End().Value());
+ TraceAlloc("Assigning free reg %s to live range %d\n",
+ RegisterName(reg),
+ current->id());
+ current->set_assigned_register(reg, mode_);
+
return true;
}
void LAllocator::AllocateBlockedReg(LiveRange* current) {
- LifetimePosition max_pos =
- LifetimePosition::FromInstructionIndex(
- chunk_->instructions()->length() + 1);
- ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- use_pos(max_pos);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- block_pos(max_pos);
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
+ if (register_use == NULL) {
+ // There is no use in the current live range that requires a register.
+ // We can just spill it.
+ Spill(current);
+ return;
+ }
+
+
+ LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+ }
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
@@ -1841,47 +1884,63 @@
}
}
- int max_reg = 0;
+ int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[max_reg].Value()) {
- max_reg = i;
+ if (use_pos[i].Value() > use_pos[reg].Value()) {
+ reg = i;
}
}
- UsePosition* first_usage = current->NextRegisterPosition(current->Start());
- if (first_usage == NULL) {
- Spill(current);
- } else if (use_pos[max_reg].Value() < first_usage->pos().Value()) {
- SplitAndSpill(current, current->Start(), first_usage->pos());
- } else {
- if (block_pos[max_reg].Value() < current->End().Value()) {
- // Split current before blocked position.
- LiveRange* second_range = Split(current,
- current->Start(),
- block_pos[max_reg]);
- AddToUnhandledSorted(second_range);
- }
+ LifetimePosition pos = use_pos[reg];
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
- SplitAndSpillIntersecting(current);
+ if (pos.Value() < register_use->pos().Value()) {
+ // All registers are blocked before the first use that requires a register.
+ // Spill starting part of live range up to that use.
+ //
+ // Corner case: the first use position is equal to the start of the range.
+ // In this case we have nothing to spill and SpillBetween will just return
+ // this range to the list of unhandled ones. This will lead to the infinite
+ // loop.
+ ASSERT(current->Start().Value() < register_use->pos().Value());
+ SpillBetween(current, current->Start(), register_use->pos());
+ return;
}
+
+ if (block_pos[reg].Value() < current->End().Value()) {
+ // Register becomes blocked before the current range end. Split before that
+ // position.
+ LiveRange* tail = SplitBetween(current,
+ current->Start(),
+ block_pos[reg].InstructionStart());
+ AddToUnhandledSorted(tail);
+ }
+
+ // Register reg is not blocked for the whole range.
+ ASSERT(block_pos[reg].Value() >= current->End().Value());
+ TraceAlloc("Assigning blocked reg %s to live range %d\n",
+ RegisterName(reg),
+ current->id());
+ current->set_assigned_register(reg, mode_);
+
+ // This register was not free. Thus we need to find and spill
+ // parts of active and inactive live regions that use the same register
+ // at the same lifetime positions as current.
+ SplitAndSpillIntersecting(current);
}
void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
ASSERT(current->HasRegisterAssigned());
int reg = current->assigned_register();
- LifetimePosition split_pos =
- LifetimePosition::FromInstructionIndex(
- chunk_->NearestGapPos(current->Start().InstructionIndex()));
+ LifetimePosition split_pos = current->Start();
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
- SplitAndSpill(range, split_pos);
+ SpillAfter(range, split_pos);
} else {
- SplitAndSpill(range, split_pos, next_pos->pos());
+ SpillBetween(range, split_pos, next_pos->pos());
}
ActiveToHandled(range);
--i;
@@ -1896,10 +1955,10 @@
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
- SplitAndSpill(range, split_pos);
+ SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
- SplitAndSpill(range, split_pos, next_intersection);
+ SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
@@ -1909,56 +1968,6 @@
}
-LiveRange* LAllocator::Split(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d[\n",
- range->id(),
- start.Value(),
- end.Value());
-
- LifetimePosition split_pos = FindOptimalSplitPos(
- start, end.PrevInstruction().InstructionEnd());
- ASSERT(split_pos.Value() >= start.Value());
- return Split(range, split_pos);
-}
-
-
-LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
- int start_instr = start.InstructionIndex();
- int end_instr = end.InstructionIndex();
- ASSERT(start_instr <= end_instr);
-
- // We have no choice
- if (start_instr == end_instr) return end;
-
- HBasicBlock* end_block = GetBlock(start);
- HBasicBlock* start_block = GetBlock(end);
-
- if (end_block == start_block) {
- // The interval is split in the same basic block. Split at latest possible
- // position.
- return end;
- }
-
- HBasicBlock* block = end_block;
- // Move to the most outside loop header.
- while (block->parent_loop_header() != NULL &&
- block->parent_loop_header()->block_id() > start_block->block_id()) {
- block = block->parent_loop_header();
- }
-
- if (block == end_block) {
- return end;
- }
-
- return LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
-}
-
-
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
@@ -1979,46 +1988,98 @@
}
-LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) {
+LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
- if (pos.Value() <= range->Start().Value()) {
- return range;
- }
+
+ if (pos.Value() <= range->Start().Value()) return range;
+
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
}
-void LAllocator::SplitAndSpill(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- // We have an interval range and want to make sure that it is
- // spilled at start and at most spilled until end.
- ASSERT(start.Value() < end.Value());
- LiveRange* tail_part = Split(range, start);
- if (tail_part->Start().Value() < end.Value()) {
- LiveRange* third_part = Split(tail_part,
- tail_part->Start().NextInstruction(),
- end);
- Spill(tail_part);
- ASSERT(third_part != tail_part);
- AddToUnhandledSorted(third_part);
- } else {
- AddToUnhandledSorted(tail_part);
- }
+LiveRange* LAllocator::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
+ ASSERT(!range->IsFixed());
+ TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+ range->id(),
+ start.Value(),
+ end.Value());
+
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+ ASSERT(split_pos.Value() >= start.Value());
+ return SplitAt(range, split_pos);
}
-void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) {
- at = LifetimePosition::FromInstructionIndex(
- chunk_->NearestGapPos(at.InstructionIndex()));
- LiveRange* second_part = Split(range, at);
+LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
+ LifetimePosition end) {
+ int start_instr = start.InstructionIndex();
+ int end_instr = end.InstructionIndex();
+ ASSERT(start_instr <= end_instr);
+
+ // We have no choice
+ if (start_instr == end_instr) return end;
+
+ HBasicBlock* end_block = GetBlock(start);
+ HBasicBlock* start_block = GetBlock(end);
+
+ if (end_block == start_block) {
+ // The interval is split in the same basic block. Split at latest possible
+ // position.
+ return end;
+ }
+
+ HBasicBlock* block = end_block;
+ // Find header of outermost loop.
+ while (block->parent_loop_header() != NULL &&
+ block->parent_loop_header()->block_id() > start_block->block_id()) {
+ block = block->parent_loop_header();
+ }
+
+ if (block == end_block) return end;
+
+ return LifetimePosition::FromInstructionIndex(
+ block->first_instruction_index());
+}
+
+
+void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+ LiveRange* second_part = SplitAt(range, pos);
Spill(second_part);
}
+void LAllocator::SpillBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
+ ASSERT(start.Value() < end.Value());
+ LiveRange* second_part = SplitAt(range, start);
+
+ if (second_part->Start().Value() < end.Value()) {
+ // The split result intersects with [start, end[.
+ // Split it at position between ]start+1, end[, spill the middle part
+ // and put the rest to unhandled.
+ LiveRange* third_part = SplitBetween(
+ second_part,
+ second_part->Start().InstructionEnd(),
+ end.PrevInstruction().InstructionEnd());
+
+ ASSERT(third_part != second_part);
+
+ Spill(second_part);
+ AddToUnhandledSorted(third_part);
+ } else {
+ // The split result does not intersect with [start, end[.
+ // Nothing to spill. Just put it to unhandled as whole.
+ AddToUnhandledSorted(second_part);
+ }
+}
+
+
void LAllocator::Spill(LiveRange* range) {
ASSERT(!range->IsSpilled());
TraceAlloc("Spilling live range %d\n", range->id());
@@ -2026,7 +2087,7 @@
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS);
+ if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op);
}
range->MakeSpilled();
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 52fee64..3ec984e 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -55,6 +55,7 @@
class LStackSlot;
class LRegister;
+
// This class represents a single point of a LOperand's lifetime.
// For each lithium instruction there are exactly two lifetime positions:
// the beginning and the end of the instruction. Lifetime positions for
@@ -121,7 +122,13 @@
// instruction.
bool IsValid() const { return value_ != -1; }
- static LifetimePosition Invalid() { return LifetimePosition(); }
+ static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+ static inline LifetimePosition MaxPosition() {
+ // We have to use this kind of getter instead of static member due to
+ // crash bug in GDB.
+ return LifetimePosition(kMaxInt);
+ }
private:
static const int kStep = 2;
@@ -135,6 +142,13 @@
};
+enum RegisterKind {
+ NONE,
+ GENERAL_REGISTERS,
+ DOUBLE_REGISTERS
+};
+
+
class LOperand: public ZoneObject {
public:
enum Kind {
@@ -594,8 +608,8 @@
explicit LiveRange(int id)
: id_(id),
spilled_(false),
- assigned_double_(false),
assigned_register_(kInvalidAssignment),
+ assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
@@ -620,10 +634,10 @@
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, bool double_reg) {
+ void set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- assigned_double_ = double_reg;
+ assigned_register_kind_ = register_kind;
ConvertOperands();
}
void MakeSpilled() {
@@ -652,9 +666,13 @@
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
+ // Split this live range at the given position which must follow the start of
+ // the range.
+ // All uses following the given position will be moved from this
+ // live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result);
- bool IsDouble() const { return assigned_double_; }
+ bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
@@ -721,8 +739,8 @@
int id_;
bool spilled_;
- bool assigned_double_;
int assigned_register_;
+ RegisterKind assigned_register_kind_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
@@ -774,8 +792,8 @@
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
- // Checks whether the value of a given virtual register is a double.
- bool HasDoubleValue(int virtual_register) const;
+ // Returns the register kind required by the given virtual register.
+ RegisterKind RequiredRegisterKind(int virtual_register) const;
// Begin a new instruction.
void BeginInstruction();
@@ -814,12 +832,6 @@
#endif
private:
- enum OperationMode {
- NONE,
- CPU_REGISTERS,
- XMM_REGISTERS
- };
-
void MeetRegisterConstraints();
void ResolvePhis();
void BuildLiveRanges();
@@ -871,17 +883,38 @@
// Helper methods for allocating registers.
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
- void SplitAndSpillIntersecting(LiveRange* range);
+
+ // Live range splitting helpers.
+
+ // Split the given range at the given position.
+ // If range starts at or after the given position then the
+ // original range is returned.
+ // Otherwise returns the live range that starts at pos and contains
+ // all uses from the original range that follow pos. Uses at pos will
+ // still be owned by the original range after splitting.
+ LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
+
+ // Split the given range in a position from the interval [start, end].
+ LiveRange* SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end);
+
+ // Find a lifetime position in the interval [start, end] which
+ // is optimal for splitting: it is either header of the outermost
+ // loop covered by this interval or the latest possible position.
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
- LiveRange* Split(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
- LiveRange* Split(LiveRange* range, LifetimePosition split_pos);
- void SplitAndSpill(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
- void SplitAndSpill(LiveRange* range, LifetimePosition at);
+
+ // Spill the given life range after position pos.
+ void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+ // Spill the given life range after position start and up to position end.
+ void SpillBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end);
+
+ void SplitAndSpillIntersecting(LiveRange* range);
+
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
@@ -914,6 +947,8 @@
HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block) const;
+ const char* RegisterName(int allocation_index);
+
LChunk* chunk_;
ZoneList<InstructionSummary*> summaries_;
InstructionSummary* next_summary_;
@@ -938,7 +973,7 @@
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
- OperationMode mode_;
+ RegisterKind mode_;
int num_registers_;
HGraph* graph_;
diff --git a/src/log-utils.cc b/src/log-utils.cc
index d6d8754..c7b7567 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -273,29 +273,7 @@
void LogMessageBuilder::AppendAddress(Address addr) {
- static Address last_address_ = NULL;
- AppendAddress(addr, last_address_);
- last_address_ = addr;
-}
-
-
-void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
- if (!FLAG_compress_log) {
- Append("0x%" V8PRIxPTR, addr);
- } else if (bias == NULL) {
- Append("%" V8PRIxPTR, addr);
- } else {
- uintptr_t delta;
- char sign;
- if (addr >= bias) {
- delta = addr - bias;
- sign = '+';
- } else {
- delta = bias - addr;
- sign = '-';
- }
- Append("%c%" V8PRIxPTR, sign, delta);
- }
+ Append("0x%" V8PRIxPTR, addr);
}
@@ -343,24 +321,6 @@
}
-bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
- return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
-}
-
-
-bool LogMessageBuilder::RetrieveCompressedPrevious(
- LogRecordCompressor* compressor, const char* prefix) {
- pos_ = 0;
- if (prefix[0] != '\0') Append(prefix);
- Vector<char> prev_record(Log::message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
- if (!has_prev) return false;
- pos_ += prev_record.length();
- return true;
-}
-
-
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = Log::Write(Log::message_buffer_, pos_);
@@ -369,145 +329,6 @@
}
}
-
-// Formatting string for back references to the whole line. E.g. "#2" means
-// "the second line above".
-const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
-
-// Formatting string for back references. E.g. "#2:10" means
-// "the second line above, start from char 10 (0-based)".
-const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
-
-
-LogRecordCompressor::~LogRecordCompressor() {
- for (int i = 0; i < buffer_.length(); ++i) {
- buffer_[i].Dispose();
- }
-}
-
-
-static int GetNumberLength(int number) {
- ASSERT(number >= 0);
- ASSERT(number < 10000);
- if (number < 10) return 1;
- if (number < 100) return 2;
- if (number < 1000) return 3;
- return 4;
-}
-
-
-int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
- // See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
- return pos == 0 ? GetNumberLength(distance) + 1
- : GetNumberLength(distance) + GetNumberLength(pos) + 2;
-}
-
-
-void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
- int distance,
- int pos) {
- if (pos == 0) {
- OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
- } else {
- OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
- }
-}
-
-
-bool LogRecordCompressor::Store(const Vector<const char>& record) {
- // Check if the record is the same as the last stored one.
- if (curr_ != -1) {
- Vector<const char>& curr = buffer_[curr_];
- if (record.length() == curr.length()
- && strncmp(record.start(), curr.start(), record.length()) == 0) {
- return false;
- }
- }
- // buffer_ is circular.
- prev_ = curr_++;
- curr_ %= buffer_.length();
- Vector<char> record_copy = Vector<char>::New(record.length());
- memcpy(record_copy.start(), record.start(), record.length());
- buffer_[curr_].Dispose();
- buffer_[curr_] =
- Vector<const char>(record_copy.start(), record_copy.length());
- return true;
-}
-
-
-bool LogRecordCompressor::RetrievePreviousCompressed(
- Vector<char>* prev_record) {
- if (prev_ == -1) return false;
-
- int index = prev_;
- // Distance from prev_.
- int distance = 0;
- // Best compression result among records in the buffer.
- struct {
- intptr_t truncated_len;
- int distance;
- int copy_from_pos;
- int backref_size;
- } best = {-1, 0, 0, 0};
- Vector<const char>& prev = buffer_[prev_];
- const char* const prev_start = prev.start();
- const char* const prev_end = prev.start() + prev.length();
- do {
- // We're moving backwards until we reach the current record.
- // Remember that buffer_ is circular.
- if (--index == -1) index = buffer_.length() - 1;
- ++distance;
- if (index == curr_) break;
-
- Vector<const char>& data = buffer_[index];
- if (data.start() == NULL) break;
- const char* const data_end = data.start() + data.length();
- const char* prev_ptr = prev_end;
- const char* data_ptr = data_end;
- // Compare strings backwards, stop on the last matching character.
- while (prev_ptr != prev_start && data_ptr != data.start()
- && *(prev_ptr - 1) == *(data_ptr - 1)) {
- --prev_ptr;
- --data_ptr;
- }
- const intptr_t truncated_len = prev_end - prev_ptr;
- const int copy_from_pos = static_cast<int>(data_ptr - data.start());
- // Check if the length of compressed tail is enough.
- if (truncated_len <= kMaxBackwardReferenceSize
- && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
- continue;
- }
-
- // Record compression results.
- if (truncated_len > best.truncated_len) {
- best.truncated_len = truncated_len;
- best.distance = distance;
- best.copy_from_pos = copy_from_pos;
- best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
- }
- } while (true);
-
- if (best.distance == 0) {
- // Can't compress the previous record. Return as is.
- ASSERT(prev_record->length() >= prev.length());
- memcpy(prev_record->start(), prev.start(), prev.length());
- prev_record->Truncate(prev.length());
- } else {
- // Copy the uncompressible part unchanged.
- const intptr_t unchanged_len = prev.length() - best.truncated_len;
- // + 1 for '\0'.
- ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
- memcpy(prev_record->start(), prev.start(), unchanged_len);
- // Append the backward reference.
- Vector<char> backref(
- prev_record->start() + unchanged_len, best.backref_size + 1);
- PrintBackwardReference(backref, best.distance, best.copy_from_pos);
- ASSERT(strlen(backref.start()) - best.backref_size == 0);
- prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
- }
- return true;
-}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
index ffea928..719d370 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -176,50 +176,6 @@
friend class Logger;
friend class LogMessageBuilder;
- friend class LogRecordCompressor;
-};
-
-
-// An utility class for performing backward reference compression
-// of string ends. It operates using a window of previous strings.
-class LogRecordCompressor {
- public:
- // 'window_size' is the size of backward lookup window.
- explicit LogRecordCompressor(int window_size)
- : buffer_(window_size + kNoCompressionWindowSize),
- kMaxBackwardReferenceSize(
- GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
- curr_(-1), prev_(-1) {
- }
-
- ~LogRecordCompressor();
-
- // Fills vector with a compressed version of the previous record.
- // Returns false if there is no previous record.
- bool RetrievePreviousCompressed(Vector<char>* prev_record);
-
- // Stores a record if it differs from a previous one (or there's no previous).
- // Returns true, if the record has been stored.
- bool Store(const Vector<const char>& record);
-
- private:
- // The minimum size of a buffer: a place needed for the current and
- // the previous record. Since there is no place for precedessors of a previous
- // record, it can't be compressed at all.
- static const int kNoCompressionWindowSize = 2;
-
- // Formatting strings for back references.
- static const char* kLineBackwardReferenceFormat;
- static const char* kBackwardReferenceFormat;
-
- static int GetBackwardReferenceSize(int distance, int pos);
-
- static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
-
- ScopedVector< Vector<const char> > buffer_;
- const int kMaxBackwardReferenceSize;
- int curr_;
- int prev_;
};
@@ -244,32 +200,14 @@
// Append a heap string.
void Append(String* str);
- // Appends an address, compressing it if needed by offsetting
- // from Logger::last_address_.
+ // Appends an address.
void AppendAddress(Address addr);
- // Appends an address, compressing it if needed.
- void AppendAddress(Address addr, Address bias);
-
void AppendDetailed(String* str, bool show_impl_info);
// Append a portion of a string.
void AppendStringPart(const char* str, int len);
- // Stores log message into compressor, returns true if the message
- // was stored (i.e. doesn't repeat the previous one).
- bool StoreInCompressor(LogRecordCompressor* compressor);
-
- // Sets log message to a previous version of compressed message.
- // Returns false, if there is no previous message.
- bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
- return RetrieveCompressedPrevious(compressor, "");
- }
-
- // Does the same at the version without arguments, and sets a prefix.
- bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
- const char* prefix);
-
// Write the log message to the log file currently opened.
void WriteToLogFile();
diff --git a/src/log.cc b/src/log.cc
index 6723347..db9ff7a 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -303,7 +303,6 @@
Logger::ticker_->SetProfiler(this);
Logger::ProfilerBeginEvent();
- Logger::LogAliases();
}
@@ -343,43 +342,21 @@
Ticker* Logger::ticker_ = NULL;
Profiler* Logger::profiler_ = NULL;
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
-const char** Logger::log_events_ = NULL;
-CompressionHelper* Logger::compression_helper_ = NULL;
int Logger::logging_nesting_ = 0;
int Logger::cpu_profiler_nesting_ = 0;
int Logger::heap_profiler_nesting_ = 0;
-#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
-const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
+#define DECLARE_EVENT(ignore1, name) name,
+const char* kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
};
-#undef DECLARE_LONG_EVENT
-
-#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
-const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
-};
-#undef DECLARE_SHORT_EVENT
+#undef DECLARE_EVENT
void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
- if (FLAG_compress_log) {
- msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
- }
- msg.WriteToLogFile();
-}
-
-
-void Logger::LogAliases() {
- if (!Log::IsEnabled() || !FLAG_compress_log) return;
- LogMessageBuilder msg;
- for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
- msg.Append("alias,%s,%s\n",
- kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
- }
msg.WriteToLogFile();
}
@@ -687,54 +664,15 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// A class that contains all common code dealing with record compression.
-class CompressionHelper {
- public:
- explicit CompressionHelper(int window_size)
- : compressor_(window_size), repeat_count_(0) { }
-
- // Handles storing message in compressor, retrieving the previous one and
- // prefixing it with repeat count, if needed.
- // Returns true if message needs to be written to log.
- bool HandleMessage(LogMessageBuilder* msg) {
- if (!msg->StoreInCompressor(&compressor_)) {
- // Current message repeats the previous one, don't write it.
- ++repeat_count_;
- return false;
- }
- if (repeat_count_ == 0) {
- return msg->RetrieveCompressedPrevious(&compressor_);
- }
- OS::SNPrintF(prefix_, "%s,%d,",
- Logger::log_events_[Logger::REPEAT_META_EVENT],
- repeat_count_ + 1);
- repeat_count_ = 0;
- return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
- }
-
- private:
- LogRecordCompressor compressor_;
- int repeat_count_;
- EmbeddedVector<char, 20> prefix_;
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,",
- log_events_[CODE_CREATION_EVENT], log_events_[CALLBACK_TAG]);
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
msg.Append(",1,\"%s%s\"", prefix, name);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -771,6 +709,7 @@
}
+#ifdef ENABLE_LOGGING_AND_PROFILING
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
case Code::FUNCTION: return code->optimizable() ? "~" : "";
@@ -778,6 +717,7 @@
default: return "";
}
}
+#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
@@ -786,7 +726,9 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.Append("%s,%s,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
for (const char* p = comment; *p != '\0'; p++) {
@@ -797,10 +739,6 @@
}
msg.Append('"');
LowLevelCodeCreateEvent(code, &msg);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -813,14 +751,12 @@
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.Append("%s,%s,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
LowLevelCodeCreateEvent(code, &msg);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -837,7 +773,9 @@
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.Append("%s,%s,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s %s:%d\"",
code->ExecutableSize(),
@@ -846,10 +784,6 @@
*sourcestr,
line);
LowLevelCodeCreateEvent(code, &msg);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -860,14 +794,12 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.Append("%s,%s,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
LowLevelCodeCreateEvent(code, &msg);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -878,7 +810,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
LogMessageBuilder msg;
- msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
+ msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
msg.WriteToLogFile();
OS::SignalCodeMovingGC();
#endif
@@ -890,16 +822,13 @@
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,",
- log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[REG_EXP_TAG]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
msg.Append('\"');
LowLevelCodeCreateEvent(code, &msg);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -924,13 +853,9 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
+ msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -942,18 +867,12 @@
// This function can be called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
- static Address prev_code = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
+ msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]);
msg.AppendAddress(function->address());
msg.Append(',');
- msg.AppendAddress(function->unchecked_code()->address(), prev_code);
- prev_code = function->unchecked_code()->address();
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
+ msg.AppendAddress(function->unchecked_code()->address());
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -987,18 +906,12 @@
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
- static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[event]);
+ msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
- msg.AppendAddress(to, prev_to_);
- prev_to_ = to;
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
+ msg.AppendAddress(to);
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -1009,12 +922,8 @@
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[event]);
+ msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
- }
msg.Append('\n');
msg.WriteToLogFile();
}
@@ -1202,30 +1111,20 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
- static Address prev_sp = NULL;
- static Address prev_function = NULL;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[TICK_EVENT]);
- Address prev_addr = sample->pc;
- msg.AppendAddress(prev_addr);
+ msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
+ msg.AppendAddress(sample->pc);
msg.Append(',');
- msg.AppendAddress(sample->sp, prev_sp);
- prev_sp = sample->sp;
+ msg.AppendAddress(sample->sp);
msg.Append(',');
- msg.AppendAddress(sample->function, prev_function);
- prev_function = sample->function;
+ msg.AppendAddress(sample->function);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
}
for (int i = 0; i < sample->frames_count; ++i) {
msg.Append(',');
- msg.AppendAddress(sample->stack[i], prev_addr);
- prev_addr = sample->stack[i];
- }
- if (FLAG_compress_log) {
- ASSERT(compression_helper_ != NULL);
- if (!compression_helper_->HandleMessage(&msg)) return;
+ msg.AppendAddress(sample->stack[i]);
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -1654,12 +1553,6 @@
sliding_state_window_ = new SlidingStateWindow();
}
- log_events_ = FLAG_compress_log ?
- kCompressedLogEventsNames : kLongLogEventsNames;
- if (FLAG_compress_log) {
- compression_helper_ = new CompressionHelper(kCompressionWindowSize);
- }
-
if (start_logging) {
logging_nesting_ = 1;
}
@@ -1686,13 +1579,17 @@
void Logger::EnsureTickerStarted() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
if (!ticker_->IsActive()) ticker_->Start();
+#endif
}
void Logger::EnsureTickerStopped() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
+#endif
}
@@ -1707,9 +1604,6 @@
profiler_ = NULL;
}
- delete compression_helper_;
- compression_helper_ = NULL;
-
delete sliding_state_window_;
sliding_state_window_ = NULL;
diff --git a/src/log.h b/src/log.h
index 54b131b..771709c 100644
--- a/src/log.h
+++ b/src/log.h
@@ -74,7 +74,6 @@
class Semaphore;
class SlidingStateWindow;
class LogMessageBuilder;
-class CompressionHelper;
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -88,58 +87,55 @@
#endif
#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation", "cc") \
- V(CODE_MOVE_EVENT, "code-move", "cm") \
- V(CODE_DELETE_EVENT, "code-delete", "cd") \
- V(CODE_MOVING_GC, "code-moving-gc", "cg") \
- V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
- V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
- V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
- V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
- V(TICK_EVENT, "tick", "t") \
- V(REPEAT_META_EVENT, "repeat", "r") \
- V(BUILTIN_TAG, "Builtin", "bi") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak", "cdb") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi") \
- V(CALL_IC_TAG, "CallIC", "cic") \
- V(CALL_INITIALIZE_TAG, "CallInitialize", "ci") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic", "cmm") \
- V(CALL_MISS_TAG, "CallMiss", "cm") \
- V(CALL_NORMAL_TAG, "CallNormal", "cn") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak", "kcdb") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
- "KeyedCallDebugPrepareStepIn", \
- "kcdbsi") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC", "kcic") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize", "kci") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic", "kcmm") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss", "kcm") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal", "kcn") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, \
- "KeyedCallPreMonomorphic", \
- "kcpm") \
- V(CALLBACK_TAG, "Callback", "cb") \
- V(EVAL_TAG, "Eval", "e") \
- V(FUNCTION_TAG, "Function", "f") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC", "ksic") \
- V(LAZY_COMPILE_TAG, "LazyCompile", "lc") \
- V(LOAD_IC_TAG, "LoadIC", "lic") \
- V(REG_EXP_TAG, "RegExp", "re") \
- V(SCRIPT_TAG, "Script", "sc") \
- V(STORE_IC_TAG, "StoreIC", "sic") \
- V(STUB_TAG, "Stub", "s") \
- V(NATIVE_FUNCTION_TAG, "Function", "f") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile", "lc") \
- V(NATIVE_SCRIPT_TAG, "Script", "sc")
+ V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_MOVE_EVENT, "code-move") \
+ V(CODE_DELETE_EVENT, "code-delete") \
+ V(CODE_MOVING_GC, "code-moving-gc") \
+ V(FUNCTION_CREATION_EVENT, "function-creation") \
+ V(FUNCTION_MOVE_EVENT, "function-move") \
+ V(FUNCTION_DELETE_EVENT, "function-delete") \
+ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
+ V(TICK_EVENT, "tick") \
+ V(REPEAT_META_EVENT, "repeat") \
+ V(BUILTIN_TAG, "Builtin") \
+ V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
+ V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
+ V(CALL_IC_TAG, "CallIC") \
+ V(CALL_INITIALIZE_TAG, "CallInitialize") \
+ V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
+ V(CALL_MISS_TAG, "CallMiss") \
+ V(CALL_NORMAL_TAG, "CallNormal") \
+ V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
+ V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
+ V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
+ "KeyedCallDebugPrepareStepIn") \
+ V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
+ V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
+ V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
+ V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
+ V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
+ V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
+ V(CALLBACK_TAG, "Callback") \
+ V(EVAL_TAG, "Eval") \
+ V(FUNCTION_TAG, "Function") \
+ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
+ V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
+ V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(LOAD_IC_TAG, "LoadIC") \
+ V(REG_EXP_TAG, "RegExp") \
+ V(SCRIPT_TAG, "Script") \
+ V(STORE_IC_TAG, "StoreIC") \
+ V(STUB_TAG, "Stub") \
+ V(NATIVE_FUNCTION_TAG, "Function") \
+ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
+ V(NATIVE_SCRIPT_TAG, "Script")
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
class Logger {
public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
enum LogEventsAndTags {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
NUMBER_OF_LOG_EVENTS
@@ -292,9 +288,6 @@
private:
- // Size of window used for log records compression.
- static const int kCompressionWindowSize = 4;
-
// Emits the profiler's first message.
static void ProfilerBeginEvent();
@@ -312,9 +305,6 @@
static void DeleteEventInternal(LogEventsAndTags event,
Address from);
- // Emits aliases for compressed messages.
- static void LogAliases();
-
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
@@ -357,15 +347,8 @@
// recent VM states.
static SlidingStateWindow* sliding_state_window_;
- // An array of log events names.
- static const char** log_events_;
-
- // An instance of helper created if log compression is enabled.
- static CompressionHelper* compression_helper_;
-
// Internal implementation classes with access to
// private members.
- friend class CompressionHelper;
friend class EventLog;
friend class TimeLog;
friend class Profiler;
diff --git a/src/macros.py b/src/macros.py
index 1ceb620..6d66def 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -140,15 +140,14 @@
# Limit according to ECMA 262 15.9.1.1
const MAX_TIME_MS = 8640000000000000;
+# Limit which is MAX_TIME_MS + msPerMonth.
+const MAX_TIME_BEFORE_UTC = 8640002592000000;
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
-macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
-macro DATE_FROM_TIME(time) = (DateFromTime(time));
-macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DATE_FROM_TIME(time));
-macro YEAR_FROM_TIME(time) = (YearFromTime(time));
+macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index b570db9..8ade41c 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1281,6 +1281,11 @@
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
+ // The recursive GC marker detects when it is nearing stack overflow,
+ // and switches to a different marking system. JS interrupts interfere
+ // with the C stack limit check.
+ PostponeInterruptsScope postpone;
+
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
diff --git a/src/math.js b/src/math.js
index fa1934d..90667d7 100644
--- a/src/math.js
+++ b/src/math.js
@@ -258,14 +258,6 @@
"max", MathMax,
"min", MathMin
));
-
- // The values here are from the MathFunctionId enum in objects.h.
- %SetMathFunctionId($Math.floor, 1);
- %SetMathFunctionId($Math.round, 2);
- %SetMathFunctionId($Math.abs, 4);
- %SetMathFunctionId($Math.sqrt, 0xd);
- // TODO(erikcorry): Set the id of the other functions so they can be
- // optimized.
};
diff --git a/src/messages.js b/src/messages.js
index c783566..c19f4a9 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -190,7 +190,6 @@
illegal_return: "Illegal return statement",
error_loading_debugger: "Error loading debugger",
no_input_to_regexp: "No input to %0",
- result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 6b9e965..55836ce 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1533,9 +1533,9 @@
};
-FrameMirror.prototype.evaluate = function(source, disable_break) {
+FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
- source, Boolean(disable_break));
+ source, Boolean(disable_break), opt_context_object);
return MakeMirror(result);
};
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 5910128..7935912 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3036,27 +3036,20 @@
}
-bool SharedFunctionInfo::HasCustomCallGenerator() {
+bool SharedFunctionInfo::HasBuiltinFunctionId() {
return function_data()->IsSmi();
}
-MathFunctionId SharedFunctionInfo::math_function_id() {
- return static_cast<MathFunctionId>(
- (compiler_hints() >> kMathFunctionShift) & kMathFunctionMask);
+bool SharedFunctionInfo::IsBuiltinMathFunction() {
+ return HasBuiltinFunctionId() &&
+ builtin_function_id() >= kFirstMathFunctionId;
}
-void SharedFunctionInfo::set_math_function_id(int math_fn) {
- ASSERT(math_fn <= max_math_id_number());
- set_compiler_hints(compiler_hints() |
- ((math_fn & kMathFunctionMask) << kMathFunctionShift));
-}
-
-
-int SharedFunctionInfo::custom_call_generator_id() {
- ASSERT(HasCustomCallGenerator());
- return Smi::cast(function_data())->value();
+BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
+ ASSERT(HasBuiltinFunctionId());
+ return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
}
diff --git a/src/objects.cc b/src/objects.cc
index 399ab09..96f5c4b 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3097,8 +3097,9 @@
MaybeObject* JSObject::DefineAccessor(String* name,
bool is_getter,
- JSFunction* fun,
+ Object* fun,
PropertyAttributes attributes) {
+ ASSERT(fun->IsJSFunction() || fun->IsUndefined());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
diff --git a/src/objects.h b/src/objects.h
index 1827ab0..498ee45 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1368,7 +1368,7 @@
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
bool is_getter,
- JSFunction* fun,
+ Object* fun,
PropertyAttributes attributes);
Object* LookupAccessor(String* name, bool is_getter);
@@ -3714,22 +3714,49 @@
};
-enum MathFunctionId {
- kNotSpecialMathFunction = 0,
- // These numbers must be kept in sync with the ones in math.js.
- kMathFloor = 1,
- kMathRound = 2,
- kMathCeil = 3,
- kMathAbs = 4,
- kMathLog = 5,
- kMathSin = 6,
- kMathCos = 7,
- kMathTan = 8,
- kMathASin = 9,
- kMathACos = 0xa,
- kMathATan = 0xb,
- kMathExp = 0xc,
- kMathSqrt = 0xd
+// List of builtin functions we want to identify to improve code
+// generation.
+//
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype", a name of a builtin function
+// on the object (the one the id is set for), and a label.
+//
+// Installation of ids for the selected builtin functions is handled
+// by the bootstrapper.
+//
+// NOTE: Order is important: math functions should be at the end of
+// the list and MathFloor should be the first math function.
+#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, floor, MathFloor) \
+ V(Math, round, MathRound) \
+ V(Math, ceil, MathCeil) \
+ V(Math, abs, MathAbs) \
+ V(Math, log, MathLog) \
+ V(Math, sin, MathSin) \
+ V(Math, cos, MathCos) \
+ V(Math, tan, MathTan) \
+ V(Math, asin, MathASin) \
+ V(Math, acos, MathACos) \
+ V(Math, atan, MathATan) \
+ V(Math, exp, MathExp) \
+ V(Math, sqrt, MathSqrt) \
+ V(Math, pow, MathPow)
+
+
+enum BuiltinFunctionId {
+#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
+ k##name,
+ FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
+#undef DECLARE_FUNCTION_ID
+ // Fake id for a special case of Math.pow. Note, it continues the
+ // list of math functions.
+ kMathPowHalf,
+ kFirstMathFunctionId = kMathFloor
};
@@ -3870,7 +3897,7 @@
// [function data]: This field holds some additional data for function.
// Currently it either has FunctionTemplateInfo to make benefit the API
- // or Smi identifying a custom call generator.
+ // or Smi identifying a builtin function.
// In the long run we don't want all functions to have this field but
// we can fix that when we have a better model for storing hidden data
// on objects.
@@ -3878,8 +3905,9 @@
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
- inline bool HasCustomCallGenerator();
- inline int custom_call_generator_id();
+ inline bool HasBuiltinFunctionId();
+ inline bool IsBuiltinMathFunction();
+ inline BuiltinFunctionId builtin_function_id();
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
@@ -4130,12 +4158,6 @@
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
- // Get/set a special tag on the functions from math.js so we can inline
- // efficient versions of them in the code.
- inline MathFunctionId math_function_id();
- inline void set_math_function_id(int id);
- static inline int max_math_id_number() { return kMathFunctionMask; }
-
typedef FixedBodyDescriptor<kNameOffset,
kThisPropertyAssignmentsOffset + kPointerSize,
kSize> BodyDescriptor;
@@ -4153,12 +4175,10 @@
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1;
static const int kAllowLazyCompilation = 2;
- static const int kMathFunctionShift = 3;
- static const int kMathFunctionMask = 0xf;
- static const int kLiveObjectsMayExist = 7;
- static const int kCodeAgeShift = 8;
+ static const int kLiveObjectsMayExist = 3;
+ static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 0x7;
- static const int kOptimizationDisabled = 11;
+ static const int kOptimizationDisabled = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
diff --git a/src/parser.cc b/src/parser.cc
index 5473f25..94ad57c 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -609,7 +609,25 @@
// Initialize parser state.
source->TryFlatten();
- scanner_.Initialize(source);
+ if (source->IsExternalTwoByteString()) {
+ // Notice that the stream is destroyed at the end of the branch block.
+ // The last line of the blocks can't be moved outside, even though they're
+ // identical calls.
+ ExternalTwoByteStringUC16CharacterStream stream(
+ Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+ scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
+ return DoParseProgram(source, in_global_context, &zone_scope);
+ } else {
+ GenericStringUC16CharacterStream stream(source, 0, source->length());
+ scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
+ return DoParseProgram(source, in_global_context, &zone_scope);
+ }
+}
+
+
+FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
+ bool in_global_context,
+ ZoneScope* zone_scope) {
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
@@ -655,25 +673,45 @@
// If there was a syntax error we have to get rid of the AST
// and it is not safe to do so before the scope has been deleted.
- if (result == NULL) zone_scope.DeleteOnExit();
+ if (result == NULL) zone_scope->DeleteOnExit();
return result;
}
-
FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
Handle<String> source(String::cast(script_->source()));
Counters::total_parse_size.Increment(source->length());
+ // Initialize parser state.
+ source->TryFlatten();
+ if (source->IsExternalTwoByteString()) {
+ ExternalTwoByteStringUC16CharacterStream stream(
+ Handle<ExternalTwoByteString>::cast(source),
+ info->start_position(),
+ info->end_position());
+ FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+ return result;
+ } else {
+ GenericStringUC16CharacterStream stream(source,
+ info->start_position(),
+ info->end_position());
+ FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+ return result;
+ }
+}
+
+
+FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
+ UC16CharacterStream* source,
+ ZoneScope* zone_scope) {
+ scanner_.Initialize(source, JavaScriptScanner::kAllLiterals);
+ ASSERT(target_stack_ == NULL);
+
Handle<String> name(String::cast(info->name()));
fni_ = new FuncNameInferrer();
fni_->PushEnclosingName(name);
- // Initialize parser state.
- source->TryFlatten();
- scanner_.Initialize(source, info->start_position(), info->end_position());
- ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
// Place holder for the result.
@@ -705,7 +743,7 @@
// not safe to do before scope has been deleted.
if (result == NULL) {
Top::StackOverflow();
- zone_scope.DeleteOnExit();
+ zone_scope->DeleteOnExit();
} else {
Handle<String> inferred_name(info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -719,12 +757,12 @@
if (pre_data() != NULL) {
symbol_id = pre_data()->GetSymbolIdentifier();
}
- return LookupSymbol(symbol_id, scanner_.literal());
+ return LookupSymbol(symbol_id, scanner().literal());
}
void Parser::ReportMessage(const char* type, Vector<const char*> args) {
- Scanner::Location source_location = scanner_.location();
+ Scanner::Location source_location = scanner().location();
ReportMessageAt(source_location, type, args);
}
@@ -1641,7 +1679,7 @@
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner_.has_line_terminator_before_next() &&
+ if (!scanner().has_line_terminator_before_next() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1667,7 +1705,7 @@
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner_.has_line_terminator_before_next() &&
+ if (!scanner().has_line_terminator_before_next() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1712,7 +1750,7 @@
}
Token::Value tok = peek();
- if (scanner_.has_line_terminator_before_next() ||
+ if (scanner().has_line_terminator_before_next() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -1844,7 +1882,7 @@
Expect(Token::THROW, CHECK_OK);
int pos = scanner().location().beg_pos;
- if (scanner_.has_line_terminator_before_next()) {
+ if (scanner().has_line_terminator_before_next()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2408,7 +2446,8 @@
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
+ if (!scanner().has_line_terminator_before_next() &&
+ Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report the
@@ -2677,7 +2716,7 @@
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
result = NewNumberLiteral(value);
break;
}
@@ -3028,7 +3067,7 @@
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
key = NewNumberLiteral(value);
break;
}
@@ -3089,7 +3128,7 @@
Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- if (!scanner_.ScanRegExpPattern(seen_equal)) {
+ if (!scanner().ScanRegExpPattern(seen_equal)) {
Next();
ReportMessage("unterminated_regexp", Vector<const char*>::empty());
*ok = false;
@@ -3099,10 +3138,10 @@
int literal_index = temp_scope_->NextMaterializedLiteralIndex();
Handle<String> js_pattern =
- Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
- scanner_.ScanRegExpFlags();
+ Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
+ scanner().ScanRegExpFlags();
Handle<String> js_flags =
- Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+ Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
Next();
return new RegExpLiteral(js_pattern, js_flags, literal_index);
@@ -3158,7 +3197,7 @@
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- int start_pos = scanner_.location().beg_pos;
+ int start_pos = scanner().location().beg_pos;
bool done = (peek() == Token::RPAREN);
while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK);
@@ -3195,7 +3234,7 @@
bool is_lazily_compiled =
mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
- int function_block_pos = scanner_.location().beg_pos;
+ int function_block_pos = scanner().location().beg_pos;
int materialized_literal_count;
int expected_property_count;
int end_pos;
@@ -3212,7 +3251,8 @@
ReportInvalidPreparseData(name, CHECK_OK);
}
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
- scanner_.SeekForward(end_pos);
+ // Seek to position just before terminal '}'.
+ scanner().SeekForward(end_pos - 1);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
@@ -3228,7 +3268,7 @@
this_property_assignments = temp_scope.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
- end_pos = scanner_.location().end_pos;
+ end_pos = scanner().location().end_pos;
}
FunctionLiteral* function_literal =
@@ -3332,7 +3372,7 @@
Next();
return;
}
- if (scanner_.has_line_terminator_before_next() ||
+ if (scanner().has_line_terminator_before_next() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -3383,8 +3423,8 @@
bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return Handle<String>();
- if (scanner_.literal_length() == 3) {
- const char* token = scanner_.literal_string();
+ if (scanner().literal_length() == 3) {
+ const char* token = scanner().literal_string();
*is_get = strcmp(token, "get") == 0;
*is_set = !*is_get && strcmp(token, "set") == 0;
}
@@ -3503,8 +3543,8 @@
// ----------------------------------------------------------------------------
// JSON
-Handle<Object> JsonParser::ParseJson(Handle<String> source) {
- source->TryFlatten();
+Handle<Object> JsonParser::ParseJson(Handle<String> script,
+ UC16CharacterStream* source) {
scanner_.Initialize(source);
stack_overflow_ = false;
Handle<Object> result = ParseJsonValue();
@@ -3540,7 +3580,7 @@
}
Scanner::Location source_location = scanner_.location();
- MessageLocation location(Factory::NewScript(source),
+ MessageLocation location(Factory::NewScript(script),
source_location.beg_pos,
source_location.end_pos);
int argc = (name_opt == NULL) ? 0 : 1;
@@ -4555,13 +4595,12 @@
// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Handle<String> source,
- unibrow::CharacterStream* stream,
+static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
bool allow_lazy,
ParserRecorder* recorder,
int literal_flags) {
V8JavaScriptScanner scanner;
- scanner.Initialize(source, stream, literal_flags);
+ scanner.Initialize(source, literal_flags);
intptr_t stack_limit = StackGuard::real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
recorder,
@@ -4580,8 +4619,7 @@
// Preparse, but only collect data that is immediately useful,
// even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
- unibrow::CharacterStream* stream,
+ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension) {
bool allow_lazy = FLAG_lazy && (extension == NULL);
if (!allow_lazy) {
@@ -4590,22 +4628,19 @@
return NULL;
}
PartialParserRecorder recorder;
-
- return DoPreParse(source, stream, allow_lazy, &recorder,
+ return DoPreParse(source, allow_lazy, &recorder,
JavaScriptScanner::kNoLiterals);
}
-ScriptDataImpl* ParserApi::PreParse(Handle<String> source,
- unibrow::CharacterStream* stream,
+ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_lazy = FLAG_lazy && (extension == NULL);
CompleteParserRecorder recorder;
int kPreParseLiteralsFlags =
JavaScriptScanner::kLiteralString | JavaScriptScanner::kLiteralIdentifier;
- return DoPreParse(source, stream, allow_lazy,
- &recorder, kPreParseLiteralsFlags);
+ return DoPreParse(source, allow_lazy, &recorder, kPreParseLiteralsFlags);
}
diff --git a/src/parser.h b/src/parser.h
index 58cd946..70d0e18 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -169,14 +169,12 @@
static bool Parse(CompilationInfo* info);
// Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(Handle<String> source,
- unibrow::CharacterStream* stream,
+ static ScriptDataImpl* PreParse(UC16CharacterStream* source,
v8::Extension* extension);
// Preparser that only does preprocessing that makes sense if only used
// immediately after.
- static ScriptDataImpl* PartialPreParse(Handle<String> source,
- unibrow::CharacterStream* stream,
+ static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension);
};
@@ -435,18 +433,26 @@
Vector<const char*> args);
protected:
+ FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
+ UC16CharacterStream* source,
+ ZoneScope* zone_scope);
enum Mode {
PARSE_LAZILY,
PARSE_EAGERLY
};
+ // Called by ParseProgram after setting up the scanner.
+ FunctionLiteral* DoParseProgram(Handle<String> source,
+ bool in_global_context,
+ ZoneScope* zone_scope);
+
// Report syntax error
void ReportUnexpectedToken(Token::Value token);
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return with_nesting_level_ > 0; }
- Scanner& scanner() { return scanner_; }
+ V8JavaScriptScanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
@@ -548,7 +554,7 @@
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
- return scanner_.peek();
+ return scanner().peek();
}
INLINE(Token::Value Next()) {
@@ -560,9 +566,11 @@
}
if (StackLimitCheck().HasOverflowed()) {
// Any further calls to Next or peek will return the illegal token.
+ // The current call must return the next token, which might already
+ // have been peek'ed.
stack_overflow_ = true;
}
- return scanner_.Next();
+ return scanner().Next();
}
INLINE(void Consume(Token::Value token));
@@ -702,7 +710,14 @@
// Parse JSON input as a single JSON value.
// Returns null handle and sets exception if parsing failed.
static Handle<Object> Parse(Handle<String> source) {
- return JsonParser().ParseJson(source);
+ if (source->IsExternalTwoByteString()) {
+ ExternalTwoByteStringUC16CharacterStream stream(
+ Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+ return JsonParser().ParseJson(source, &stream);
+ } else {
+ GenericStringUC16CharacterStream stream(source, 0, source->length());
+ return JsonParser().ParseJson(source, &stream);
+ }
}
private:
@@ -710,7 +725,7 @@
~JsonParser() { }
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String>);
+ Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
// Parse a single JSON value from input (grammar production JSONValue).
// A JSON value is either a (double-quoted) string literal, a number literal,
// one of "true", "false", or "null", or an object or array literal.
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 46c74b0..7efb25d 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -134,9 +134,7 @@
}
bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const int max_items = 2;
- const char* search_strings[max_items] = { NULL, NULL };
- int search_items = 0;
+ const char* search_string = NULL;
// Simple detection of VFP at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to ARM (mid 2009), no similar
@@ -144,25 +142,26 @@
// so it's up to individual OSes to provide such.
switch (feature) {
case VFP3:
- search_strings[0] = "vfpv3";
- // Some old kernels will report vfp for A8, not vfpv3, so we check for
- // A8 explicitely. The cpuinfo file report the CPU Part which for Cortex
- // A8 is 0xc08.
- search_strings[1] = "0xc08";
- search_items = 2;
- ASSERT(search_items <= max_items);
+ search_string = "vfpv3";
break;
case ARMv7:
- search_strings[0] = "ARMv7" ;
- search_items = 1;
- ASSERT(search_items <= max_items);
+ search_string = "ARMv7";
break;
default:
UNREACHABLE();
}
- for (int i = 0; i < search_items; ++i) {
- if (CPUInfoContainsString(search_strings[i])) {
+ if (CPUInfoContainsString(search_string)) {
+ return true;
+ }
+
+ if (feature == VFP3) {
+ // Some old kernels will report vfp not vfpv3. Here we make a last attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3.
+ // Checking neon on its own is not enough as it is possible to have neon
+ // without vfp.
+ if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
return true;
}
}
diff --git a/src/platform.h b/src/platform.h
index 68a2689..13ea983 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -114,6 +114,8 @@
#endif // __GNUC__
#include "atomicops.h"
+#include "utils.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -123,6 +125,7 @@
typedef intptr_t AtomicWord;
class Semaphore;
+class Mutex;
double ceiling(double x);
double modulo(double x, double y);
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index f096e94..cbec9b7 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -39,39 +39,121 @@
namespace internal {
// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUTF16Buffer : public UTF16Buffer {
+class InputStreamUTF16Buffer : public UC16CharacterStream {
public:
- explicit InputStreamUTF16Buffer(UnicodeInputStream* stream)
- : UTF16Buffer(),
- stream_(stream) { }
+ /* The InputStreamUTF16Buffer maintains an internal buffer
+ * that is filled in chunks from the UC16CharacterStream.
+ * It also maintains unlimited pushback capability, but optimized
+ * for small pushbacks.
+ * The pushback_buffer_ pointer points to the limit of pushbacks
+ * in the current buffer. There is room for a few pushback'ed chars before
+ * the buffer containing the most recently read chunk. If this is overflowed,
+ * an external buffer is allocated/reused to hold further pushbacks, and
+ * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
+ * new buffer. When this buffer is read to the end again, the cursor is
+ * switched back to the internal buffer
+ */
+ explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
+ : UC16CharacterStream(),
+ stream_(stream),
+ pushback_buffer_(buffer_),
+ pushback_buffer_end_cache_(NULL),
+ pushback_buffer_backing_(NULL),
+ pushback_buffer_backing_size_(0) {
+ buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
+ }
- virtual ~InputStreamUTF16Buffer() { }
+ virtual ~InputStreamUTF16Buffer() {
+ if (pushback_buffer_backing_ != NULL) {
+ DeleteArray(pushback_buffer_backing_);
+ }
+ }
- virtual void PushBack(uc32 ch) {
- stream_->PushBack(ch);
+ virtual void PushBack(uc16 ch) {
+ ASSERT(pos_ > 0);
+ if (buffer_cursor_ <= pushback_buffer_) {
+ // No more room in the current buffer to do pushbacks.
+ if (pushback_buffer_end_cache_ == NULL) {
+ // We have overflowed the pushback space at the beginning of buffer_.
+ // Switch to using a separate allocated pushback buffer.
+ if (pushback_buffer_backing_ == NULL) {
+ // Allocate a buffer the first time we need it.
+ pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
+ pushback_buffer_backing_size_ = kPushBackSize;
+ }
+ pushback_buffer_ = pushback_buffer_backing_;
+ pushback_buffer_end_cache_ = buffer_end_;
+ buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
+ buffer_cursor_ = buffer_end_ - 1;
+ } else {
+ // Hit the bottom of the allocated pushback buffer.
+ // Double the buffer and continue.
+ uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
+ memcpy(new_buffer + pushback_buffer_backing_size_,
+ pushback_buffer_backing_,
+ pushback_buffer_backing_size_);
+ DeleteArray(pushback_buffer_backing_);
+ buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
+ pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
+ buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
+ }
+ }
+ pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = ch;
pos_--;
}
- virtual uc32 Advance() {
- uc32 result = stream_->Next();
- if (result >= 0) pos_++;
- return result;
+ protected:
+ virtual bool ReadBlock() {
+ if (pushback_buffer_end_cache_ != NULL) {
+ buffer_cursor_ = buffer_;
+ buffer_end_ = pushback_buffer_end_cache_;
+ pushback_buffer_end_cache_ = NULL;
+ return buffer_end_ > buffer_cursor_;
+ }
+ // Copy the top of the buffer into the pushback area.
+ int32_t value;
+ uc16* buffer_start = buffer_ + kPushBackSize;
+ buffer_cursor_ = buffer_end_ = buffer_start;
+ while ((value = stream_->Next()) >= 0) {
+ if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
+ value = unibrow::Utf8::kBadChar;
+ }
+ // buffer_end_ is a const pointer, but buffer_ is writable.
+ buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
+ if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
+ }
+ return buffer_end_ > buffer_start;
}
- virtual void SeekForward(int pos) {
+ virtual unsigned SlowSeekForward(unsigned pos) {
// Seeking in the input is not used by preparsing.
// It's only used by the real parser based on preparser data.
UNIMPLEMENTED();
+ return 0;
}
private:
+ static const unsigned kBufferSize = 512;
+ static const unsigned kPushBackSize = 16;
v8::UnicodeInputStream* const stream_;
+ // Buffer holding first kPushBackSize characters of pushback buffer,
+ // then kBufferSize chars of read-ahead.
+ // The pushback buffer is only used if pushing back characters past
+ // the start of a block.
+ uc16 buffer_[kPushBackSize + kBufferSize];
+ // Limit of pushbacks before new allocation is necessary.
+ uc16* pushback_buffer_;
+ // Only if that pushback buffer at the start of buffer_ isn't sufficient
+ // is the following used.
+ const uc16* pushback_buffer_end_cache_;
+ uc16* pushback_buffer_backing_;
+ unsigned pushback_buffer_backing_size_;
};
class StandAloneJavaScriptScanner : public JavaScriptScanner {
public:
- void Initialize(UTF16Buffer* source) {
+ void Initialize(UC16CharacterStream* source) {
source_ = source;
literal_flags_ = kLiteralString | kLiteralIdentifier;
Init();
@@ -92,7 +174,6 @@
bool EnableSlowAsserts() { return true; }
-
} // namespace internal.
diff --git a/src/preparser.cc b/src/preparser.cc
index 03fc9dc..7cce685 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -1078,6 +1078,7 @@
Expect(i::Token::RBRACE, CHECK_OK);
+ // Position right after terminal '}'.
int end_pos = scanner_->location().end_pos;
log_->LogFunction(function_block_pos, end_pos,
function_scope.materialized_literal_count(),
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 8b5c1e2..3df6af0 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -122,7 +122,7 @@
}
-inline uint64_t HeapEntry::id() {
+uint64_t HeapEntry::id() {
union {
Id stored_id;
uint64_t returned_id;
@@ -146,6 +146,18 @@
}
}
+
+bool HeapSnapshotGenerator::ReportProgress(bool force) {
+ const int kProgressReportGranularity = 10000;
+ if (control_ != NULL
+ && (force || progress_counter_ % kProgressReportGranularity == 0)) {
+ return
+ control_->ReportProgressValue(progress_counter_, progress_total_) ==
+ v8::ActivityControl::kContinue;
+ }
+ return true;
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index ff4661f..364f51d 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1382,86 +1382,6 @@
}
-void HeapSnapshot::FillReversePostorderIndexes(Vector<HeapEntry*>* entries) {
- ClearPaint();
- int current_entry = 0;
- List<HeapEntry*> nodes_to_visit;
- nodes_to_visit.Add(root());
- root()->paint_reachable();
- while (!nodes_to_visit.is_empty()) {
- HeapEntry* entry = nodes_to_visit.last();
- Vector<HeapGraphEdge> children = entry->children();
- bool has_new_edges = false;
- for (int i = 0; i < children.length(); ++i) {
- if (children[i].type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* child = children[i].to();
- if (!child->painted_reachable()) {
- nodes_to_visit.Add(child);
- child->paint_reachable();
- has_new_edges = true;
- }
- }
- if (!has_new_edges) {
- entry->set_ordered_index(current_entry);
- (*entries)[current_entry++] = entry;
- nodes_to_visit.RemoveLast();
- }
- }
- entries->Truncate(current_entry);
-}
-
-
-static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
- int finger1 = i1, finger2 = i2;
- while (finger1 != finger2) {
- while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
- while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
- }
- return finger1;
-}
-
-// The algorithm is based on the article:
-// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
-// Softw. Pract. Exper. 4 (2001), pp. 1–10.
-void HeapSnapshot::BuildDominatorTree(const Vector<HeapEntry*>& entries,
- Vector<HeapEntry*>* dominators) {
- if (entries.length() == 0) return;
- const int root_index = entries.length() - 1;
- for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
- (*dominators)[root_index] = entries[root_index];
- bool changed = true;
- while (changed) {
- changed = false;
- for (int i = root_index - 1; i >= 0; --i) {
- HeapEntry* new_idom = NULL;
- Vector<HeapGraphEdge*> rets = entries[i]->retainers();
- int j = 0;
- for (; j < rets.length(); ++j) {
- if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* ret = rets[j]->From();
- if (dominators->at(ret->ordered_index()) != NULL) {
- new_idom = ret;
- break;
- }
- }
- for (++j; j < rets.length(); ++j) {
- if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
- HeapEntry* ret = rets[j]->From();
- if (dominators->at(ret->ordered_index()) != NULL) {
- new_idom = entries[Intersect(ret->ordered_index(),
- new_idom->ordered_index(),
- *dominators)];
- }
- }
- if (new_idom != NULL && dominators->at(i) != new_idom) {
- (*dominators)[i] = new_idom;
- changed = true;
- }
- }
- }
-}
-
-
void HeapSnapshot::SetDominatorsToSelf() {
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry* entry = entries_[i];
@@ -1470,61 +1390,6 @@
}
-void HeapSnapshot::SetEntriesDominators() {
- // This array is used for maintaining reverse postorder of nodes.
- ScopedVector<HeapEntry*> ordered_entries(entries_.length());
- FillReversePostorderIndexes(&ordered_entries);
- ScopedVector<HeapEntry*> dominators(ordered_entries.length());
- BuildDominatorTree(ordered_entries, &dominators);
- for (int i = 0; i < ordered_entries.length(); ++i) {
- ASSERT(dominators[i] != NULL);
- ordered_entries[i]->set_dominator(dominators[i]);
- }
- // For nodes unreachable from root, set dominator to itself.
- SetDominatorsToSelf();
-}
-
-
-void HeapSnapshot::ApproximateRetainedSizes() {
- SetEntriesDominators();
- // As for the dominators tree we only know parent nodes, not
- // children, to sum up total sizes we traverse the tree level by
- // level upwards, starting from leaves.
- for (int i = 0; i < entries_.length(); ++i) {
- HeapEntry* entry = entries_[i];
- entry->set_retained_size(entry->self_size());
- entry->set_leaf();
- }
- while (true) {
- bool onlyLeaves = true;
- for (int i = 0; i < entries_.length(); ++i) {
- HeapEntry *entry = entries_[i], *dominator = entry->dominator();
- if (!entry->is_processed() && dominator != entry) {
- dominator->set_non_leaf();
- onlyLeaves = false;
- }
- }
- if (onlyLeaves) break;
-
- for (int i = 0; i < entries_.length(); ++i) {
- HeapEntry *entry = entries_[i], *dominator = entry->dominator();
- if (entry->is_leaf() && dominator != entry) {
- dominator->add_retained_size(entry->retained_size());
- }
- }
-
- // Mark all current leaves as processed, reset non-leaves back to leaves.
- for (int i = 0; i < entries_.length(); ++i) {
- HeapEntry* entry = entries_[i];
- if (entry->is_leaf())
- entry->set_processed();
- else if (entry->is_non_leaf())
- entry->set_leaf();
- }
- }
-}
-
-
HeapEntry* HeapSnapshot::GetNextEntryToInit() {
if (entries_.length() > 0) {
HeapEntry* last_entry = entries_.last();
@@ -1716,15 +1581,22 @@
const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
- HeapSnapshot* snapshot = new HeapSnapshot(this, type, name, uid);
- snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
- return snapshot;
+ return new HeapSnapshot(this, type, name, uid);
+}
+
+
+void HeapSnapshotsCollection::SnapshotGenerationFinished(
+ HeapSnapshot* snapshot) {
+ ids_.SnapshotGenerationFinished();
+ if (snapshot != NULL) {
+ snapshots_.Add(snapshot);
+ HashMap::Entry* entry =
+ snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
+ static_cast<uint32_t>(snapshot->uid()),
+ true);
+ ASSERT(entry->value == NULL);
+ entry->value = snapshot;
+ }
}
@@ -1832,8 +1704,10 @@
}
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
+ v8::ActivityControl* control)
: snapshot_(snapshot),
+ control_(control),
collection_(snapshot->collection()),
filler_(NULL) {
}
@@ -1990,21 +1864,13 @@
};
-void HeapSnapshotGenerator::GenerateSnapshot() {
+bool HeapSnapshotGenerator::GenerateSnapshot() {
AssertNoAllocation no_alloc;
+ SetProgressTotal(4); // 2 passes + dominators + sizes.
+
// Pass 1. Iterate heap contents to count entries and references.
- SnapshotCounter counter(&entries_);
- filler_ = &counter;
- filler_->AddEntry(HeapSnapshot::kInternalRootObject);
- filler_->AddEntry(HeapSnapshot::kGcRootsObject);
- HeapIterator iterator(HeapIterator::kPreciseFiltering);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- ExtractReferences(obj);
- }
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor(this);
- Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ if (!CountEntriesAndReferences()) return false;
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
@@ -2014,16 +1880,14 @@
entries_.UpdateEntries(&allocator);
// Pass 2. Fill references.
- SnapshotFiller filler(snapshot_, &entries_);
- filler_ = &filler;
- iterator.reset();
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- ExtractReferences(obj);
- }
- SetRootGcRootsReference();
- Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ if (!FillReferences()) return false;
- snapshot_->ApproximateRetainedSizes();
+ if (!SetEntriesDominators()) return false;
+ if (!ApproximateRetainedSizes()) return false;
+
+ progress_counter_ = progress_total_;
+ if (!ReportProgress(true)) return false;
+ return true;
}
@@ -2351,6 +2215,183 @@
}
+void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
+ if (control_ == NULL) return;
+
+ HeapIterator iterator(HeapIterator::kPreciseFiltering);
+ int objects_count = 0;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next(), ++objects_count) {}
+ progress_total_ = objects_count * iterations_count;
+ progress_counter_ = 0;
+}
+
+
+bool HeapSnapshotGenerator::CountEntriesAndReferences() {
+ SnapshotCounter counter(&entries_);
+ filler_ = &counter;
+ filler_->AddEntry(HeapSnapshot::kInternalRootObject);
+ filler_->AddEntry(HeapSnapshot::kGcRootsObject);
+ return IterateAndExtractReferences();
+}
+
+
+bool HeapSnapshotGenerator::FillReferences() {
+ SnapshotFiller filler(snapshot_, &entries_);
+ filler_ = &filler;
+ return IterateAndExtractReferences();
+}
+
+
+void HeapSnapshotGenerator::FillReversePostorderIndexes(
+ Vector<HeapEntry*>* entries) {
+ snapshot_->ClearPaint();
+ int current_entry = 0;
+ List<HeapEntry*> nodes_to_visit;
+ nodes_to_visit.Add(snapshot_->root());
+ snapshot_->root()->paint_reachable();
+ while (!nodes_to_visit.is_empty()) {
+ HeapEntry* entry = nodes_to_visit.last();
+ Vector<HeapGraphEdge> children = entry->children();
+ bool has_new_edges = false;
+ for (int i = 0; i < children.length(); ++i) {
+ if (children[i].type() == HeapGraphEdge::kShortcut) continue;
+ HeapEntry* child = children[i].to();
+ if (!child->painted_reachable()) {
+ nodes_to_visit.Add(child);
+ child->paint_reachable();
+ has_new_edges = true;
+ }
+ }
+ if (!has_new_edges) {
+ entry->set_ordered_index(current_entry);
+ (*entries)[current_entry++] = entry;
+ nodes_to_visit.RemoveLast();
+ }
+ }
+ entries->Truncate(current_entry);
+}
+
+
+static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
+ int finger1 = i1, finger2 = i2;
+ while (finger1 != finger2) {
+ while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
+ while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
+ }
+ return finger1;
+}
+
+// The algorithm is based on the article:
+// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
+// Softw. Pract. Exper. 4 (2001), pp. 1–10.
+bool HeapSnapshotGenerator::BuildDominatorTree(
+ const Vector<HeapEntry*>& entries,
+ Vector<HeapEntry*>* dominators) {
+ if (entries.length() == 0) return true;
+ const int entries_length = entries.length(), root_index = entries_length - 1;
+ for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
+ (*dominators)[root_index] = entries[root_index];
+ int changed = 1;
+ const int base_progress_counter = progress_counter_;
+ while (changed != 0) {
+ changed = 0;
+ for (int i = root_index - 1; i >= 0; --i) {
+ HeapEntry* new_idom = NULL;
+ Vector<HeapGraphEdge*> rets = entries[i]->retainers();
+ int j = 0;
+ for (; j < rets.length(); ++j) {
+ if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
+ HeapEntry* ret = rets[j]->From();
+ if (dominators->at(ret->ordered_index()) != NULL) {
+ new_idom = ret;
+ break;
+ }
+ }
+ for (++j; j < rets.length(); ++j) {
+ if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
+ HeapEntry* ret = rets[j]->From();
+ if (dominators->at(ret->ordered_index()) != NULL) {
+ new_idom = entries[Intersect(ret->ordered_index(),
+ new_idom->ordered_index(),
+ *dominators)];
+ }
+ }
+ if (new_idom != NULL && dominators->at(i) != new_idom) {
+ (*dominators)[i] = new_idom;
+ ++changed;
+ }
+ }
+ int remaining = entries_length - changed;
+ if (remaining < 0) remaining = 0;
+ progress_counter_ = base_progress_counter + remaining;
+ if (!ReportProgress(true)) return false;
+ }
+ return true;
+}
+
+
+bool HeapSnapshotGenerator::SetEntriesDominators() {
+ // This array is used for maintaining reverse postorder of nodes.
+ ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
+ FillReversePostorderIndexes(&ordered_entries);
+ ScopedVector<HeapEntry*> dominators(ordered_entries.length());
+ if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
+ for (int i = 0; i < ordered_entries.length(); ++i) {
+ ASSERT(dominators[i] != NULL);
+ ordered_entries[i]->set_dominator(dominators[i]);
+ }
+ // For nodes unreachable from root, set dominator to itself.
+ snapshot_->SetDominatorsToSelf();
+ return true;
+}
+
+
+bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
+ // As for the dominators tree we only know parent nodes, not
+ // children, to sum up total sizes we "bubble" node's self size
+ // adding it to all of its parents.
+ for (int i = 0; i < snapshot_->entries()->length(); ++i) {
+ HeapEntry* entry = snapshot_->entries()->at(i);
+ entry->set_retained_size(entry->self_size());
+ }
+ for (int i = 0;
+ i < snapshot_->entries()->length();
+ ++i, IncProgressCounter()) {
+ HeapEntry* entry = snapshot_->entries()->at(i);
+ int entry_size = entry->self_size();
+ for (HeapEntry* dominator = entry->dominator();
+ dominator != entry;
+ entry = dominator, dominator = entry->dominator()) {
+ dominator->add_retained_size(entry_size);
+ }
+ if (!ReportProgress()) return false;
+ }
+ return true;
+}
+
+
+bool HeapSnapshotGenerator::IterateAndExtractReferences() {
+ HeapIterator iterator(HeapIterator::kPreciseFiltering);
+ bool interrupted = false;
+ // Heap iteration with precise filtering must be finished in any case.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next(), IncProgressCounter()) {
+ if (!interrupted) {
+ ExtractReferences(obj);
+ if (!ReportProgress()) interrupted = true;
+ }
+ }
+ if (interrupted) return false;
+ SetRootGcRootsReference();
+ RootsReferencesExtractor extractor(this);
+ Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ return ReportProgress();
+}
+
+
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
raw_additions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 16764a2..cacd27e 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -526,7 +526,7 @@
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
- uint64_t id();
+ inline uint64_t id();
int self_size() { return self_size_; }
int retained_size() { return retained_size_; }
void add_retained_size(int size) { retained_size_ += size; }
@@ -558,13 +558,6 @@
void ApplyAndPaintAllReachable(Visitor* visitor);
void PaintAllReachable();
- bool is_leaf() { return painted_ == kLeaf; }
- void set_leaf() { painted_ = kLeaf; }
- bool is_non_leaf() { return painted_ == kNonLeaf; }
- void set_non_leaf() { painted_ = kNonLeaf; }
- bool is_processed() { return painted_ == kProcessed; }
- void set_processed() { painted_ = kProcessed; }
-
void SetIndexedReference(HeapGraphEdge::Type type,
int child_index,
int index,
@@ -625,10 +618,6 @@
static const unsigned kUnpainted = 0;
static const unsigned kPainted = 1;
static const unsigned kPaintedReachableFromOthers = 2;
- // Paints used for approximate retained sizes calculation.
- static const unsigned kLeaf = 0;
- static const unsigned kNonLeaf = 1;
- static const unsigned kProcessed = 2;
static const int kExactRetainedSizeTag = 1;
@@ -682,6 +671,7 @@
unsigned uid() { return uid_; }
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
+ List<HeapEntry*>* entries() { return &entries_; }
void AllocateEntries(
int entries_count, int children_count, int retainers_count);
@@ -693,7 +683,6 @@
int size,
int children_count,
int retainers_count);
- void ApproximateRetainedSizes();
void ClearPaint();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
HeapEntry* GetEntryById(uint64_t id);
@@ -716,10 +705,6 @@
int children_count,
int retainers_count);
HeapEntry* GetNextEntryToInit();
- void BuildDominatorTree(const Vector<HeapEntry*>& entries,
- Vector<HeapEntry*>* dominators);
- void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
- void SetEntriesDominators();
HeapSnapshotsCollection* collection_;
Type type_;
@@ -845,7 +830,7 @@
HeapSnapshot* NewSnapshot(
HeapSnapshot::Type type, const char* name, unsigned uid);
- void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
+ void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@@ -968,16 +953,27 @@
HeapEntry* child_entry) = 0;
};
- explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
- void GenerateSnapshot();
+ HeapSnapshotGenerator(HeapSnapshot* snapshot,
+ v8::ActivityControl* control);
+ bool GenerateSnapshot();
private:
+ bool ApproximateRetainedSizes();
+ bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
+ Vector<HeapEntry*>* dominators);
+ bool CountEntriesAndReferences();
HeapEntry* GetEntry(Object* obj);
+ void IncProgressCounter() { ++progress_counter_; }
void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
+ bool FillReferences();
+ void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
+ bool IterateAndExtractReferences();
+ inline bool ReportProgress(bool force = false);
+ bool SetEntriesDominators();
void SetClosureReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@@ -1009,8 +1005,10 @@
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
+ void SetProgressTotal(int iterations_count);
HeapSnapshot* snapshot_;
+ v8::ActivityControl* control_;
HeapSnapshotsCollection* collection_;
// Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
@@ -1018,6 +1016,9 @@
// Used during references extraction to mark heap objects that
// are references via non-hidden properties.
HeapObjectsSet known_references_;
+ // Used during snapshot generation.
+ int progress_counter_;
+ int progress_total_;
friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor;
diff --git a/src/regexp.js b/src/regexp.js
index d01d04f..dd27266 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -120,22 +120,28 @@
function BuildResultFromMatchInfo(lastMatchInfo, s) {
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
- var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
- if (numResults === 1) {
- var matchStart = lastMatchInfo[CAPTURE(0)];
- var matchEnd = lastMatchInfo[CAPTURE(1)];
- result[0] = SubString(s, matchStart, matchEnd);
+ var start = lastMatchInfo[CAPTURE0];
+ var end = lastMatchInfo[CAPTURE1];
+ var result = %_RegExpConstructResult(numResults, start, s);
+ if (start + 1 == end) {
+ result[0] = %_StringCharAt(s, start);
} else {
- for (var i = 0; i < numResults; i++) {
- var matchStart = lastMatchInfo[CAPTURE(i << 1)];
- var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
- if (matchStart != -1 && matchEnd != -1) {
- result[i] = SubString(s, matchStart, matchEnd);
+ result[0] = %_SubString(s, start, end);
+ }
+ var j = REGEXP_FIRST_CAPTURE + 2;
+ for (var i = 1; i < numResults; i++) {
+ start = lastMatchInfo[j++];
+ end = lastMatchInfo[j++];
+ if (end != -1) {
+ if (start + 1 == end) {
+ result[i] = %_StringCharAt(s, start);
} else {
- // Make sure the element is present. Avoid reading the undefined
- // property from the global object since this may change.
- result[i] = void 0;
+ result[i] = %_SubString(s, start, end);
}
+ } else {
+ // Make sure the element is present. Avoid reading the undefined
+ // property from the global object since this may change.
+ result[i] = void 0;
}
}
return result;
@@ -166,12 +172,7 @@
}
string = regExpInput;
}
- var s;
- if (IS_STRING(string)) {
- s = string;
- } else {
- s = ToString(string);
- }
+ string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
// Conversion is required by the ES5 specification (RegExp.prototype.exec
@@ -180,7 +181,7 @@
var global = this.global;
if (global) {
- if (i < 0 || i > s.length) {
+ if (i < 0 || i > string.length) {
this.lastIndex = 0;
return null;
}
@@ -188,9 +189,9 @@
i = 0;
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+ %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
if (global) this.lastIndex = 0;
@@ -202,7 +203,7 @@
if (global) {
this.lastIndex = lastMatchInfo[CAPTURE1];
}
- return BuildResultFromMatchInfo(matchIndices, s);
+ return BuildResultFromMatchInfo(matchIndices, string);
}
@@ -227,12 +228,7 @@
string = regExpInput;
}
- var s;
- if (IS_STRING(string)) {
- s = string;
- } else {
- s = ToString(string);
- }
+ string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
@@ -241,13 +237,13 @@
var i = TO_INTEGER(lastIndex);
if (this.global) {
- if (i < 0 || i > s.length) {
+ if (i < 0 || i > string.length) {
this.lastIndex = 0;
return false;
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+ %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
this.lastIndex = 0;
return false;
@@ -269,11 +265,11 @@
(this.ignoreCase ? 'i' : '')
+ (this.multiline ? 'm' : ''));
}
- if (!regexp_val.test(s)) return false;
+ if (!regexp_val.test(string)) return false;
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+ %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, s, 0, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
if (matchIndices === null) return false;
lastMatchInfoOverride = null;
return true;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 051dc51..c53ddd2 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -68,12 +68,18 @@
};
+enum SamplerState {
+ IN_NON_JS_STATE = 0,
+ IN_JS_STATE = 1
+};
+
+
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
static const int kSamplerWindowSize = 16;
-static const int kSamplerTicksDelta = 32;
+static const int kSamplerTicksBetweenThresholdAdjustment = 32;
static const int kSamplerThresholdInit = 3;
static const int kSamplerThresholdMin = 1;
@@ -88,6 +94,11 @@
static int sampler_threshold = kSamplerThresholdInit;
static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
+static int sampler_ticks_until_threshold_adjustment =
+ kSamplerTicksBetweenThresholdAdjustment;
+
+// The ratio of ticks spent in JS code in percent.
+static Atomic32 js_ratio;
// The JSFunctions in the sampler window are not GC safe. Old-space
// pointers are not cleared during mark-sweep collection and therefore
@@ -261,40 +272,71 @@
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
JSFunction* samples[kSamplerFrameCount];
- int count = 0;
+ int sample_count = 0;
+ int frame_count = 0;
for (JavaScriptFrameIterator it;
- count < kSamplerFrameCount && !it.done();
+ frame_count++ < kSamplerFrameCount && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = JSFunction::cast(frame->function());
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor;
- if (function_size > kSizeLimit) {
- threshold_size_factor = sampler_threshold_size_factor;
- } else {
- threshold_size_factor = 1;
+
+ // Adjust threshold each time we have processed
+ // a certain number of ticks.
+ if (sampler_ticks_until_threshold_adjustment > 0) {
+ sampler_ticks_until_threshold_adjustment--;
+ if (sampler_ticks_until_threshold_adjustment <= 0) {
+ // If the threshold is not already at the minimum
+ // modify and reset the ticks until next adjustment.
+ if (sampler_threshold > kSamplerThresholdMin) {
+ sampler_threshold -= kSamplerThresholdDelta;
+ sampler_ticks_until_threshold_adjustment =
+ kSamplerTicksBetweenThresholdAdjustment;
+ }
+ }
}
- int threshold = sampler_threshold * threshold_size_factor;
- samples[count++] = function;
if (function->IsMarkedForLazyRecompilation()) {
Code* unoptimized = function->shared()->code();
int nesting = unoptimized->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
- } else if (LookupSample(function) >= threshold) {
- if (IsOptimizable(function)) {
- Optimize(function, false, 0);
- CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
- }
+ }
+
+ // Do not record non-optimizable functions.
+ if (!IsOptimizable(function)) continue;
+ samples[sample_count++] = function;
+
+ int function_size = function->shared()->SourceSize();
+ int threshold_size_factor = (function_size > kSizeLimit)
+ ? sampler_threshold_size_factor
+ : 1;
+
+ int threshold = sampler_threshold * threshold_size_factor;
+ int current_js_ratio = NoBarrier_Load(&js_ratio);
+
+ // Adjust threshold depending on the ratio of time spent
+ // in JS code.
+ if (current_js_ratio < 20) {
+ // If we spend less than 20% of the time in JS code,
+ // do not optimize.
+ continue;
+ } else if (current_js_ratio < 75) {
+ // Below 75% of time spent in JS code, only optimize very
+ // frequently used functions.
+ threshold *= 3;
+ }
+
+ if (LookupSample(function) >= threshold) {
+ Optimize(function, false, 0);
+ CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
}
}
// Add the collected functions as samples. It's important not to do
// this as part of collecting them because this will interfere with
// the sample lookup in case of recursive functions.
- for (int i = 0; i < count; i++) {
+ for (int i = 0; i < sample_count; i++) {
AddSample(samples[i], kSamplerFrameWeight[i]);
}
}
@@ -308,8 +350,35 @@
}
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static void UpdateStateRatio(SamplerState current_state) {
+ static const int kStateWindowSize = 128;
+ static SamplerState state_window[kStateWindowSize];
+ static int state_window_position = 0;
+ static int state_counts[2] = { kStateWindowSize, 0 };
+
+ SamplerState old_state = state_window[state_window_position];
+ state_counts[old_state]--;
+ state_window[state_window_position] = current_state;
+ state_counts[current_state]++;
+ ASSERT(IsPowerOf2(kStateWindowSize));
+ state_window_position = (state_window_position + 1) &
+ (kStateWindowSize - 1);
+ NoBarrier_Store(&js_ratio, state_counts[IN_JS_STATE] * 100 /
+ kStateWindowSize);
+}
+#endif
+
+
void RuntimeProfiler::NotifyTick() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Record state sample.
+ SamplerState state = Top::IsInJSState()
+ ? IN_JS_STATE
+ : IN_NON_JS_STATE;
+ UpdateStateRatio(state);
StackGuard::RequestRuntimeProfilerTick();
+#endif
}
@@ -341,6 +410,8 @@
void RuntimeProfiler::Reset() {
sampler_threshold = kSamplerThresholdInit;
+ sampler_ticks_until_threshold_adjustment =
+ kSamplerTicksBetweenThresholdAdjustment;
sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
}
@@ -361,6 +432,7 @@
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
static const int kNonJSTicksThreshold = 100;
// We suspend the runtime profiler thread when not running
// JavaScript. If the CPU profiler is active we must not do this
@@ -378,6 +450,7 @@
}
}
}
+#endif
return false;
}
diff --git a/src/runtime.cc b/src/runtime.cc
index efdb508..0fd2f8b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -614,22 +614,6 @@
}
-// Sets the magic number that identifies a function as one of the special
-// math functions that can be inlined.
-static MaybeObject* Runtime_SetMathFunctionId(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSFunction, function, args[0]);
- CONVERT_CHECKED(Smi, id, args[1]);
- RUNTIME_ASSERT(id->value() >= 0);
- RUNTIME_ASSERT(id->value() < SharedFunctionInfo::max_math_id_number());
-
- function->shared()->set_math_function_id(id->value());
-
- return Heap::undefined_value();
-}
-
-
static MaybeObject* Runtime_IsConstructCall(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
@@ -3516,7 +3500,8 @@
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
- CONVERT_CHECKED(JSFunction, fun, args[3]);
+ Object* fun = args[3];
+ RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
CONVERT_CHECKED(Smi, flag_attr, args[4]);
int unchecked = flag_attr->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -3572,7 +3557,7 @@
}
LookupResult result;
- js_object->LocalLookupRealNamedProperty(*name, &result);
+ js_object->LookupRealNamedProperty(*name, &result);
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
@@ -3580,7 +3565,8 @@
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
- if (result.IsProperty() && attr != result.GetAttributes()) {
+ if (result.IsProperty() &&
+ (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
// New attributes - normalize to avoid writing to instance descriptor
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
@@ -4546,42 +4532,53 @@
static const unsigned int kQuoteTableLength = 128u;
-static const char* const JsonQuotes[kQuoteTableLength] = {
- "\\u0000", "\\u0001", "\\u0002", "\\u0003",
- "\\u0004", "\\u0005", "\\u0006", "\\u0007",
- "\\b", "\\t", "\\n", "\\u000b",
- "\\f", "\\r", "\\u000e", "\\u000f",
- "\\u0010", "\\u0011", "\\u0012", "\\u0013",
- "\\u0014", "\\u0015", "\\u0016", "\\u0017",
- "\\u0018", "\\u0019", "\\u001a", "\\u001b",
- "\\u001c", "\\u001d", "\\u001e", "\\u001f",
- NULL, NULL, "\\\"", NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "\\\\", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
+static const int kJsonQuotesCharactersPerEntry = 8;
+static const char* const JsonQuotes =
+ "\\u0000 \\u0001 \\u0002 \\u0003 "
+ "\\u0004 \\u0005 \\u0006 \\u0007 "
+ "\\b \\t \\n \\u000b "
+ "\\f \\r \\u000e \\u000f "
+ "\\u0010 \\u0011 \\u0012 \\u0013 "
+ "\\u0014 \\u0015 \\u0016 \\u0017 "
+ "\\u0018 \\u0019 \\u001a \\u001b "
+ "\\u001c \\u001d \\u001e \\u001f "
+ " ! \\\" # "
+ "$ % & ' "
+ "( ) * + "
+ ", - . / "
+ "0 1 2 3 "
+ "4 5 6 7 "
+ "8 9 : ; "
+ "< = > ? "
+ "@ A B C "
+ "D E F G "
+ "H I J K "
+ "L M N O "
+ "P Q R S "
+ "T U V W "
+ "X Y Z [ "
+ "\\\\ ] ^ _ "
+ "` a b c "
+ "d e f g "
+ "h i j k "
+ "l m n o "
+ "p q r s "
+ "t u v w "
+ "x y z { "
+ "| } ~ \177 ";
+// For a string that is less than 32k characters it should always be
+// possible to allocate it in new space.
+static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
+
+
+// Doing JSON quoting cannot make the string more than this many times larger.
+static const int kJsonQuoteWorstCaseBlowup = 6;
+
+
+// Covers the entire ASCII range (all other characters are unchanged by JSON
+// quoting).
static const byte JsonQuoteLengths[kQuoteTableLength] = {
6, 6, 6, 6, 6, 6, 6, 6,
2, 2, 2, 6, 2, 2, 6, 6,
@@ -4602,18 +4599,6 @@
};
-template <typename Char>
-Char* WriteString(Char* dst, const char* src_string) {
- char c;
- for (c = *src_string; c; c = *src_string) {
- *dst = c;
- dst++;
- src_string++;
- }
- return dst;
-}
-
-
template <typename StringType>
MaybeObject* AllocateRawString(int length);
@@ -4631,58 +4616,111 @@
template <typename Char, typename StringType>
-static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
+static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
int length = characters.length();
- int quoted_length = 0;
- for (int i = 0; i < length; i++) {
- unsigned int c = characters[i];
- if (sizeof(Char) > 1u) {
- quoted_length += (c >= kQuoteTableLength) ? 1 : JsonQuoteLengths[c];
+ const Char* read_cursor = characters.start();
+ const Char* end = read_cursor + length;
+ const int kSpaceForQuotes = 2;
+ int quoted_length = kSpaceForQuotes;
+ while (read_cursor < end) {
+ Char c = *(read_cursor++);
+ if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+ quoted_length++;
} else {
- quoted_length += JsonQuoteLengths[c];
+ quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
}
}
- Counters::quote_json_char_count.Increment(length);
-
- // Add space for quotes.
- quoted_length += 2;
-
MaybeObject* new_alloc = AllocateRawString<StringType>(quoted_length);
Object* new_object;
if (!new_alloc->ToObject(&new_object)) {
- Counters::quote_json_char_recount.Increment(length);
return new_alloc;
}
StringType* new_string = StringType::cast(new_object);
+ Char* write_cursor = reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize);
+ *(write_cursor++) = '"';
+
+ read_cursor = characters.start();
+ while (read_cursor < end) {
+ Char c = *(read_cursor++);
+ if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+ *(write_cursor++) = c;
+ } else {
+ int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+ const char* replacement = JsonQuotes +
+ static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+ for (int i = 0; i < len; i++) {
+ *write_cursor++ = *replacement++;
+ }
+ }
+ }
+ *(write_cursor++) = '"';
+ return new_string;
+}
+
+
+template <typename Char, typename StringType>
+static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
+ int length = characters.length();
+ Counters::quote_json_char_count.Increment(length);
+ const int kSpaceForQuotes = 2;
+ int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
+ if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+ return SlowQuoteJsonString<Char, StringType>(characters);
+ }
+
+ MaybeObject* new_alloc = AllocateRawString<StringType>(worst_case_length);
+ Object* new_object;
+ if (!new_alloc->ToObject(&new_object)) {
+ return new_alloc;
+ }
+ if (!Heap::new_space()->Contains(new_object)) {
+ // Even if our string is small enough to fit in new space we still have to
+ // handle it being allocated in old space as may happen in the third
+ // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
+ // CEntryStub::GenerateCore.
+ return SlowQuoteJsonString<Char, StringType>(characters);
+ }
+ StringType* new_string = StringType::cast(new_object);
+ ASSERT(Heap::new_space()->Contains(new_string));
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize);
*(write_cursor++) = '"';
+
const Char* read_cursor = characters.start();
- if (quoted_length == length + 2) {
- CopyChars(write_cursor, read_cursor, length);
- write_cursor += length;
- } else {
- const Char* end = read_cursor + length;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- const char* replacement = JsonQuotes[static_cast<unsigned>(c)];
- if (!replacement) {
- *(write_cursor++) = c;
- } else {
- write_cursor = WriteString(write_cursor, replacement);
+ const Char* end = read_cursor + length;
+ while (read_cursor < end) {
+ Char c = *(read_cursor++);
+ if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+ *(write_cursor++) = c;
+ } else {
+ int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+ const char* replacement = JsonQuotes +
+ static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+ write_cursor[0] = replacement[0];
+ if (len > 1) {
+ write_cursor[1] = replacement[1];
+ if (len > 2) {
+ ASSERT(len == 6);
+ write_cursor[2] = replacement[2];
+ write_cursor[3] = replacement[3];
+ write_cursor[4] = replacement[4];
+ write_cursor[5] = replacement[5];
}
}
+ write_cursor += len;
}
}
*(write_cursor++) = '"';
- ASSERT_EQ(SeqAsciiString::kHeaderSize + quoted_length * sizeof(Char),
- reinterpret_cast<Address>(write_cursor) - new_string->address());
+
+ int final_length = static_cast<int>(
+ write_cursor - reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize));
+ Heap::new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
+ final_length);
return new_string;
}
@@ -5977,37 +6015,6 @@
}
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-static double powi(double x, int y) {
- ASSERT(y != kMinInt);
- unsigned n = (y < 0) ? -y : y;
- double m = x;
- double p = 1;
- while (true) {
- if ((n & 1) != 0) p *= m;
- n >>= 1;
- if (n == 0) {
- if (y < 0) {
- // Unfortunately, we have to be careful when p has reached
- // infinity in the computation, because sometimes the higher
- // internal precision in the pow() implementation would have
- // given us a finite p. This happens very rarely.
- double result = 1.0 / p;
- return (result == 0 && isinf(p))
- ? pow(x, static_cast<double>(y)) // Avoid pow(double, int).
- : result;
- } else {
- return p;
- }
- }
- m *= m;
- }
-}
-
-
static MaybeObject* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -6019,31 +6026,11 @@
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = Smi::cast(args[1])->value();
- return Heap::NumberFromDouble(powi(x, y));
+ return Heap::NumberFromDouble(power_double_int(x, y));
}
CONVERT_DOUBLE_CHECKED(y, args[1]);
-
- if (!isinf(x)) {
- if (y == 0.5) {
- // It's not uncommon to use Math.pow(x, 0.5) to compute the
- // square root of a number. To speed up such computations, we
- // explictly check for this case and use the sqrt() function
- // which is faster than pow().
- return Heap::AllocateHeapNumber(sqrt(x));
- } else if (y == -0.5) {
- // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
- return Heap::AllocateHeapNumber(1.0 / sqrt(x));
- }
- }
-
- if (y == 0) {
- return Smi::FromInt(1);
- } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
- } else {
- return Heap::AllocateHeapNumber(pow(x, y));
- }
+ return Heap::AllocateHeapNumber(power_double_double(x, y));
}
// Fast version of Math.pow if we know that y is not an integer and
@@ -6054,11 +6041,11 @@
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
if (y == 0) {
- return Smi::FromInt(1);
+ return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
+ return Heap::nan_value();
} else {
- return Heap::AllocateHeapNumber(pow(x, y));
+ return Heap::AllocateHeapNumber(pow(x, y));
}
}
@@ -6891,12 +6878,17 @@
if (CompileOptimized(function, ast_id) && function->IsOptimized()) {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
function->code()->deoptimization_data());
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
+ if (data->OsrPcOffset()->value() >= 0) {
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement offset %d in optimized code]\n",
data->OsrPcOffset()->value());
+ }
+ ASSERT(data->OsrAstId()->value() == ast_id);
+ } else {
+ // We may never generate the desired OSR entry if we emit an
+ // early deoptimize.
+ succeeded = false;
}
- ASSERT(data->OsrAstId()->value() == ast_id);
- ASSERT(data->OsrPcOffset()->value() >= 0);
} else {
succeeded = false;
}
@@ -7671,13 +7663,13 @@
}
-// Push an array unto an array of arrays if it is not already in the
+// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
- CONVERT_CHECKED(JSArray, element, args[1]);
+ CONVERT_CHECKED(JSObject, element, args[1]);
RUNTIME_ASSERT(array->HasFastElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
@@ -9688,7 +9680,7 @@
// Check the execution state and decode arguments frame and source to be
// evaluated.
- ASSERT(args.length() == 4);
+ ASSERT(args.length() == 5);
Object* check_result;
{ MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
if (!maybe_check_result->ToObject(&check_result)) {
@@ -9698,6 +9690,7 @@
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
CONVERT_ARG_CHECKED(String, source, 2);
CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
+ Handle<Object> additional_context(args[4]);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@@ -9748,6 +9741,11 @@
Handle<Context> function_context(frame_context->fcontext());
context = CopyWithContextChain(frame_context, context);
+ if (additional_context->IsJSObject()) {
+ context = Factory::NewWithContext(context,
+ Handle<JSObject>::cast(additional_context), false);
+ }
+
// Wrap the evaluation statement in a new function compiled in the newly
// created context. The function has one parameter which has to be called
// 'arguments'. This it to have access to what would have been 'arguments' in
@@ -9802,7 +9800,7 @@
// Check the execution state and decode arguments frame and source to be
// evaluated.
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
Object* check_result;
{ MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
if (!maybe_check_result->ToObject(&check_result)) {
@@ -9811,6 +9809,7 @@
}
CONVERT_ARG_CHECKED(String, source, 1);
CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
+ Handle<Object> additional_context(args[3]);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@@ -9829,11 +9828,24 @@
// debugger was invoked.
Handle<Context> context = Top::global_context();
+ bool is_global = true;
+
+ if (additional_context->IsJSObject()) {
+ // Create a function context first, than put 'with' context on top of it.
+ Handle<JSFunction> go_between = Factory::NewFunction(
+ Factory::empty_string(), Factory::undefined_value());
+ go_between->set_context(*context);
+ context =
+ Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ context->set_extension(JSObject::cast(*additional_context));
+ is_global = false;
+ }
+
// Compile the source to be evaluated.
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(source,
context,
- true);
+ is_global);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
diff --git a/src/runtime.h b/src/runtime.h
index 5c841fc..5ecae7e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -66,7 +66,6 @@
\
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
- F(SetMathFunctionId, 2, 1) \
\
F(IsConstructCall, 0, 1) \
\
@@ -343,8 +342,8 @@
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 3, 1) \
F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 4, 1) \
- F(DebugEvaluateGlobal, 3, 1) \
+ F(DebugEvaluate, 5, 1) \
+ F(DebugEvaluateGlobal, 4, 1) \
F(DebugGetLoadedScripts, 0, 1) \
F(DebugReferencedBy, 3, 1) \
F(DebugConstructedBy, 2, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index f2c8d6b..28a38ca 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -594,13 +594,15 @@
// ECMA-262, section 8.6.2.6, page 28.
function DefaultNumber(x) {
- if (IS_FUNCTION(x.valueOf)) {
- var v = x.valueOf();
+ var valueOf = x.valueOf;
+ if (IS_FUNCTION(valueOf)) {
+ var v = %_CallFunction(x, valueOf);
if (%IsPrimitive(v)) return v;
}
- if (IS_FUNCTION(x.toString)) {
- var s = x.toString();
+ var toString = x.toString;
+ if (IS_FUNCTION(toString)) {
+ var s = %_CallFunction(x, toString);
if (%IsPrimitive(s)) return s;
}
@@ -610,13 +612,15 @@
// ECMA-262, section 8.6.2.6, page 28.
function DefaultString(x) {
- if (IS_FUNCTION(x.toString)) {
- var s = x.toString();
+ var toString = x.toString;
+ if (IS_FUNCTION(toString)) {
+ var s = %_CallFunction(x, toString);
if (%IsPrimitive(s)) return s;
}
- if (IS_FUNCTION(x.valueOf)) {
- var v = x.valueOf();
+ var valueOf = x.valueOf;
+ if (IS_FUNCTION(valueOf)) {
+ var v = %_CallFunction(x, valueOf);
if (%IsPrimitive(v)) return v;
}
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 9e58c4e..b26fee0 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -35,12 +35,6 @@
namespace internal {
// ----------------------------------------------------------------------------
-// UTF16Buffer
-
-UTF16Buffer::UTF16Buffer()
- : pos_(0), end_(kNoEndPosition) { }
-
-// ----------------------------------------------------------------------------
// LiteralCollector
LiteralCollector::LiteralCollector()
@@ -92,7 +86,7 @@
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner() : source_(NULL) {}
+Scanner::Scanner() { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -142,8 +136,7 @@
// ----------------------------------------------------------------------------
// JavaScriptScanner
-JavaScriptScanner::JavaScriptScanner()
- : has_line_terminator_before_next_(false) {}
+JavaScriptScanner::JavaScriptScanner() : Scanner() {}
Token::Value JavaScriptScanner::Next() {
@@ -503,12 +496,21 @@
void JavaScriptScanner::SeekForward(int pos) {
- source_->SeekForward(pos - 1);
- Advance();
- // This function is only called to seek to the location
- // of the end of a function (at the "}" token). It doesn't matter
- // whether there was a line terminator in the part we skip.
- has_line_terminator_before_next_ = false;
+ // After this call, we will have the token at the given position as
+ // the "next" token. The "current" token will be invalid.
+ if (pos == next_.location.beg_pos) return;
+ int current_pos = source_pos();
+ ASSERT_EQ(next_.location.end_pos, current_pos);
+ // Positions inside the lookahead token aren't supported.
+ ASSERT(pos >= current_pos);
+ if (pos != current_pos) {
+ source_->SeekForward(pos - source_->pos());
+ Advance();
+ // This function is only called to seek to the location
+ // of the end of a function (at the "}" token). It doesn't matter
+ // whether there was a line terminator in the part we skip.
+ has_line_terminator_before_next_ = false;
+ }
Scan();
}
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 3d344f3..c50b8f3 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -52,31 +52,75 @@
return -1;
}
-// ----------------------------------------------------------------------------
-// UTF16Buffer - scanner input source with pushback.
-class UTF16Buffer {
+// ---------------------------------------------------------------------
+// Buffered stream of characters, using an internal UC16 buffer.
+
+class UC16CharacterStream {
public:
- UTF16Buffer();
- virtual ~UTF16Buffer() {}
+ UC16CharacterStream() : pos_(0) { }
+ virtual ~UC16CharacterStream() { }
- virtual void PushBack(uc32 ch) = 0;
- // Returns a value < 0 when the buffer end is reached.
- virtual uc32 Advance() = 0;
- virtual void SeekForward(int pos) = 0;
+ // Returns and advances past the next UC16 character in the input
+ // stream. If there are no more characters, it returns a negative
+ // value.
+ inline int32_t Advance() {
+ if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
+ pos_++;
+ return *(buffer_cursor_++);
+ }
+ // Note: currently the following increment is necessary to avoid a
+ // parser problem! The scanner treats the final kEndOfInput as
+ // a character with a position, and does math relative to that
+ // position.
+ pos_++;
- int pos() const { return pos_; }
+ return kEndOfInput;
+ }
- static const int kNoEndPosition = 1;
+ // Return the current position in the character stream.
+ // Starts at zero.
+ inline unsigned pos() const { return pos_; }
+
+ // Skips forward past the next character_count UC16 characters
+ // in the input, or until the end of input if that comes sooner.
+ // Returns the number of characters actually skipped. If less
+ // than character_count,
+ inline unsigned SeekForward(unsigned character_count) {
+ unsigned buffered_chars =
+ static_cast<unsigned>(buffer_end_ - buffer_cursor_);
+ if (character_count <= buffered_chars) {
+ buffer_cursor_ += character_count;
+ pos_ += character_count;
+ return character_count;
+ }
+ return SlowSeekForward(character_count);
+ }
+
+ // Pushes back the most recently read UC16 character, i.e.,
+ // the value returned by the most recent call to Advance.
+ // Must not be used right after calling SeekForward.
+ virtual void PushBack(uc16 character) = 0;
protected:
- // Initial value of end_ before the input stream is initialized.
+ static const int32_t kEndOfInput = -1;
- int pos_; // Current position in the buffer.
- int end_; // Position where scanning should stop (EOF).
+ // Ensures that the buffer_cursor_ points to the character at
+ // position pos_ of the input, if possible. If the position
+ // is at or after the end of the input, return false. If there
+ // are more characters available, return true.
+ virtual bool ReadBlock() = 0;
+ virtual unsigned SlowSeekForward(unsigned character_count) = 0;
+
+ const uc16* buffer_cursor_;
+ const uc16* buffer_end_;
+ unsigned pos_;
};
+// ---------------------------------------------------------------------
+// Constants used by scanners.
+
class ScannerConstants : AllStatic {
public:
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
@@ -277,7 +321,7 @@
// Low-level scanning support.
void Advance() { c0_ = source_->Advance(); }
void PushBack(uc32 ch) {
- source_->PushBack(ch);
+ source_->PushBack(c0_);
c0_ = ch;
}
@@ -307,8 +351,8 @@
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
- // Input stream. Must be initialized to an UTF16Buffer.
- UTF16Buffer* source_;
+ // Input stream. Must be initialized to an UC16CharacterStream.
+ UC16CharacterStream* source_;
// Buffer to hold literal values (identifiers, strings, numbers)
// using '\x00'-terminated UTF-8 encoding. Handles allocation internally.
diff --git a/src/scanner.cc b/src/scanner.cc
index d22ebc7..47e9895 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,63 +36,265 @@
namespace internal {
// ----------------------------------------------------------------------------
-// UTF16Buffer
+// BufferedUC16CharacterStreams
-// CharacterStreamUTF16Buffer
-CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
- : pushback_buffer_(0), last_(0), stream_(NULL) { }
+BufferedUC16CharacterStream::BufferedUC16CharacterStream()
+ : UC16CharacterStream(),
+ pushback_limit_(NULL) {
+ // Initialize buffer as being empty. First read will fill the buffer.
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_;
+}
+BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
-void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
- unibrow::CharacterStream* input,
- int start_position,
- int end_position) {
- stream_ = input;
- if (start_position > 0) {
- SeekForward(start_position);
+void BufferedUC16CharacterStream::PushBack(uc16 character) {
+ if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
+ // buffer_ is writable, buffer_cursor_ is const pointer.
+ buffer_[--buffer_cursor_ - buffer_] = character;
+ pos_--;
+ return;
}
- end_ = end_position != kNoEndPosition ? end_position : kMaxInt;
+ SlowPushBack(character);
}
-void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
- pushback_buffer()->Add(last_);
- last_ = ch;
+void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
+ // In pushback mode, the end of the buffer contains pushback,
+ // and the start of the buffer (from buffer start to pushback_limit_)
+ // contains valid data that comes just after the pushback.
+ // We NULL the pushback_limit_ if pushing all the way back to the
+ // start of the buffer.
+
+ if (pushback_limit_ == NULL) {
+ // Enter pushback mode.
+ pushback_limit_ = buffer_end_;
+ buffer_end_ = buffer_ + kBufferSize;
+ buffer_cursor_ = buffer_end_;
+ }
+ ASSERT(pushback_limit_ > buffer_);
+ ASSERT(pos_ > 0);
+ buffer_[--buffer_cursor_ - buffer_] = character;
+ if (buffer_cursor_ == buffer_) {
+ pushback_limit_ = NULL;
+ } else if (buffer_cursor_ < pushback_limit_) {
+ pushback_limit_ = buffer_cursor_;
+ }
pos_--;
}
-uc32 CharacterStreamUTF16Buffer::Advance() {
- ASSERT(end_ != kNoEndPosition);
- ASSERT(end_ >= 0);
- // NOTE: It is of importance to Persian / Farsi resources that we do
- // *not* strip format control characters in the scanner; see
- //
- // https://bugzilla.mozilla.org/show_bug.cgi?id=274152
- //
- // So, even though ECMA-262, section 7.1, page 11, dictates that we
- // must remove Unicode format-control characters, we do not. This is
- // in line with how IE and SpiderMonkey handles it.
- if (!pushback_buffer()->is_empty()) {
- pos_++;
- return last_ = pushback_buffer()->RemoveLast();
- } else if (stream_->has_more() && pos_ < end_) {
- pos_++;
- uc32 next = stream_->GetNext();
- return last_ = next;
- } else {
- // Note: currently the following increment is necessary to avoid a
- // test-parser problem!
- pos_++;
- return last_ = static_cast<uc32>(-1);
+bool BufferedUC16CharacterStream::ReadBlock() {
+ if (pushback_limit_ != NULL) {
+ buffer_cursor_ = buffer_;
+ buffer_end_ = pushback_limit_;
+ pushback_limit_ = NULL;
+ ASSERT(buffer_cursor_ != buffer_end_);
+ return true;
+ }
+ unsigned length = FillBuffer(pos_, kBufferSize);
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_ + length;
+ return length > 0;
+}
+
+
+unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
+ // Leave pushback mode (i.e., ignore that there might be valid data
+ // in the buffer before the pushback_limit_ point).
+ pushback_limit_ = NULL;
+ return BufferSeekForward(delta);
+}
+
+// ----------------------------------------------------------------------------
+// GenericStringUC16CharacterStream
+
+
+GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
+ Handle<String> data,
+ unsigned start_position,
+ unsigned end_position)
+ : string_(data),
+ length_(end_position) {
+ ASSERT(end_position >= start_position);
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_;
+ pos_ = start_position;
+}
+
+
+GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
+
+
+unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
+ unsigned old_pos = pos_;
+ pos_ = Min(pos_ + delta, length_);
+ ReadBlock();
+ return pos_ - old_pos;
+}
+
+
+unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
+ unsigned length) {
+ if (from_pos >= length_) return 0;
+ if (from_pos + length > length_) {
+ length = length_ - from_pos;
+ }
+ String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
+ return length;
+}
+
+
+// ----------------------------------------------------------------------------
+// Utf8ToUC16CharacterStream
+Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
+ unsigned length)
+ : BufferedUC16CharacterStream(),
+ raw_data_(data),
+ raw_data_length_(length),
+ raw_data_pos_(0),
+ raw_character_position_(0) {
+ ReadBlock();
+}
+
+
+Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
+
+
+unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
+ unsigned old_pos = pos_;
+ unsigned target_pos = pos_ + delta;
+ SetRawPosition(target_pos);
+ pos_ = raw_character_position_;
+ ReadBlock();
+ return pos_ - old_pos;
+}
+
+
+unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
+ unsigned length) {
+ static const unibrow::uchar kMaxUC16Character = 0xffff;
+ SetRawPosition(char_position);
+ if (raw_character_position_ != char_position) {
+ // char_position was not a valid position in the stream (hit the end
+ // while spooling to it).
+ return 0u;
+ }
+ unsigned i = 0;
+ while (i < length) {
+ if (raw_data_pos_ == raw_data_length_) break;
+ unibrow::uchar c = raw_data_[raw_data_pos_];
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
+ raw_data_pos_++;
+ } else {
+ c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
+ raw_data_length_ - raw_data_pos_,
+ &raw_data_pos_);
+ // Don't allow characters outside of the BMP.
+ if (c > kMaxUC16Character) {
+ c = unibrow::Utf8::kBadChar;
+ }
+ }
+ buffer_[i++] = static_cast<uc16>(c);
+ }
+ raw_character_position_ = char_position + i;
+ return i;
+}
+
+
+static const byte kUtf8MultiByteMask = 0xC0;
+static const byte kUtf8MultiByteCharStart = 0xC0;
+static const byte kUtf8MultiByteCharFollower = 0x80;
+
+
+#ifdef DEBUG
+static bool IsUtf8MultiCharacterStart(byte first_byte) {
+ return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
+}
+#endif
+
+
+static bool IsUtf8MultiCharacterFollower(byte later_byte) {
+ return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
+}
+
+
+// Move the cursor back to point at the preceding UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
+ byte character = buffer[--*cursor];
+ if (character > unibrow::Utf8::kMaxOneByteChar) {
+ ASSERT(IsUtf8MultiCharacterFollower(character));
+ // Last byte of a multi-byte character encoding. Step backwards until
+ // pointing to the first byte of the encoding, recognized by having the
+ // top two bits set.
+ while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
+ ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
}
}
-void CharacterStreamUTF16Buffer::SeekForward(int pos) {
- pos_ = pos;
- ASSERT(pushback_buffer()->is_empty());
- stream_->Seek(pos);
+// Move the cursor forward to point at the next following UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
+ byte character = buffer[(*cursor)++];
+ if (character > unibrow::Utf8::kMaxOneByteChar) {
+ // First character of a multi-byte character encoding.
+ // The number of most-significant one-bits determines the length of the
+ // encoding:
+ // 110..... - (0xCx, 0xDx) one additional byte (minimum).
+ // 1110.... - (0xEx) two additional bytes.
+ // 11110... - (0xFx) three additional bytes (maximum).
+ ASSERT(IsUtf8MultiCharacterStart(character));
+ // Additional bytes is:
+ // 1 if value in range 0xC0 .. 0xDF.
+ // 2 if value in range 0xE0 .. 0xEF.
+ // 3 if value in range 0xF0 .. 0xF7.
+ // Encode that in a single value.
+ unsigned additional_bytes =
+ ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
+ *cursor += additional_bytes;
+ ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
+ }
+}
+
+
+void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
+ if (raw_character_position_ > target_position) {
+ // Spool backwards in utf8 buffer.
+ do {
+ Utf8CharacterBack(raw_data_, &raw_data_pos_);
+ raw_character_position_--;
+ } while (raw_character_position_ > target_position);
+ return;
+ }
+ // Spool forwards in the utf8 buffer.
+ while (raw_character_position_ < target_position) {
+ if (raw_data_pos_ == raw_data_length_) return;
+ Utf8CharacterForward(raw_data_, &raw_data_pos_);
+ raw_character_position_++;
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ExternalTwoByteStringUC16CharacterStream
+
+ExternalTwoByteStringUC16CharacterStream::
+ ~ExternalTwoByteStringUC16CharacterStream() { }
+
+
+ExternalTwoByteStringUC16CharacterStream
+ ::ExternalTwoByteStringUC16CharacterStream(
+ Handle<ExternalTwoByteString> data,
+ int start_position,
+ int end_position)
+ : UC16CharacterStream(),
+ source_(data),
+ raw_data_(data->GetTwoByteData(start_position)) {
+ buffer_cursor_ = raw_data_,
+ buffer_end_ = raw_data_ + (end_position - start_position);
+ pos_ = start_position;
}
@@ -115,15 +317,19 @@
complete_ = true;
}
+
// ----------------------------------------------------------------------------
// V8JavaScriptScanner
-void V8JavaScriptScanner::Initialize(Handle<String> source,
+V8JavaScriptScanner::V8JavaScriptScanner() : JavaScriptScanner() { }
+
+
+void V8JavaScriptScanner::Initialize(UC16CharacterStream* source,
int literal_flags) {
- source_ = stream_initializer_.Init(source, NULL, 0, source->length());
+ source_ = source;
+ literal_flags_ = literal_flags | kLiteralIdentifier;
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
- literal_flags_ = literal_flags | kLiteralIdentifier;
Init();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@@ -133,79 +339,14 @@
}
-void V8JavaScriptScanner::Initialize(Handle<String> source,
- unibrow::CharacterStream* stream,
- int literal_flags) {
- source_ = stream_initializer_.Init(source, stream,
- 0, UTF16Buffer::kNoEndPosition);
- literal_flags_ = literal_flags | kLiteralIdentifier;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
-void V8JavaScriptScanner::Initialize(Handle<String> source,
- int start_position,
- int end_position,
- int literal_flags) {
- source_ = stream_initializer_.Init(source, NULL,
- start_position, end_position);
- literal_flags_ = literal_flags | kLiteralIdentifier;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
-UTF16Buffer* StreamInitializer::Init(Handle<String> source,
- unibrow::CharacterStream* stream,
- int start_position,
- int end_position) {
- // Either initialize the scanner from a character stream or from a
- // string.
- ASSERT(source.is_null() || stream == NULL);
-
- // Initialize the source buffer.
- if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
- two_byte_string_buffer_.Initialize(
- Handle<ExternalTwoByteString>::cast(source),
- start_position,
- end_position);
- return &two_byte_string_buffer_;
- } else if (!source.is_null() && StringShape(*source).IsExternalAscii()) {
- ascii_string_buffer_.Initialize(
- Handle<ExternalAsciiString>::cast(source),
- start_position,
- end_position);
- return &ascii_string_buffer_;
- } else {
- if (!source.is_null()) {
- safe_string_input_buffer_.Reset(source.location());
- stream = &safe_string_input_buffer_;
- }
- char_stream_buffer_.Initialize(source,
- stream,
- start_position,
- end_position);
- return &char_stream_buffer_;
- }
-}
-
// ----------------------------------------------------------------------------
// JsonScanner
-JsonScanner::JsonScanner() {}
+JsonScanner::JsonScanner() : Scanner() { }
-void JsonScanner::Initialize(Handle<String> source) {
- source_ = stream_initializer_.Init(source, NULL, 0, source->length());
+void JsonScanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
Init();
// Skip initial whitespace.
SkipJsonWhiteSpace();
diff --git a/src/scanner.h b/src/scanner.h
index adeea9b..572778f 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -35,67 +35,97 @@
namespace v8 {
namespace internal {
-// UTF16 buffer to read characters from a character stream.
-class CharacterStreamUTF16Buffer: public UTF16Buffer {
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos_ pointing to any position,
+// even positions before the current).
+class BufferedUC16CharacterStream: public UC16CharacterStream {
public:
- CharacterStreamUTF16Buffer();
- virtual ~CharacterStreamUTF16Buffer() {}
- void Initialize(Handle<String> data,
- unibrow::CharacterStream* stream,
- int start_position,
- int end_position);
- virtual void PushBack(uc32 ch);
- virtual uc32 Advance();
- virtual void SeekForward(int pos);
+ BufferedUC16CharacterStream();
+ virtual ~BufferedUC16CharacterStream();
- private:
- List<uc32> pushback_buffer_;
- uc32 last_;
- unibrow::CharacterStream* stream_;
+ virtual void PushBack(uc16 character);
- List<uc32>* pushback_buffer() { return &pushback_buffer_; }
+ protected:
+ static const unsigned kBufferSize = 512;
+ static const unsigned kPushBackStepSize = 16;
+
+ virtual unsigned SlowSeekForward(unsigned delta);
+ virtual bool ReadBlock();
+ virtual void SlowPushBack(uc16 character);
+
+ virtual unsigned BufferSeekForward(unsigned delta) = 0;
+ virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
+
+ const uc16* pushback_limit_;
+ uc16 buffer_[kBufferSize];
+};
+
+
+// Generic string stream.
+class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+ GenericStringUC16CharacterStream(Handle<String> data,
+ unsigned start_position,
+ unsigned end_position);
+ virtual ~GenericStringUC16CharacterStream();
+
+ protected:
+ virtual unsigned BufferSeekForward(unsigned delta);
+ virtual unsigned FillBuffer(unsigned position, unsigned length);
+
+ Handle<String> string_;
+ unsigned start_position_;
+ unsigned length_;
+};
+
+
+// UC16 stream based on a literal UTF-8 string.
+class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+ Utf8ToUC16CharacterStream(const byte* data, unsigned length);
+ virtual ~Utf8ToUC16CharacterStream();
+
+ protected:
+ virtual unsigned BufferSeekForward(unsigned delta);
+ virtual unsigned FillBuffer(unsigned char_position, unsigned length);
+ void SetRawPosition(unsigned char_position);
+
+ const byte* raw_data_;
+ unsigned raw_data_length_; // Measured in bytes, not characters.
+ unsigned raw_data_pos_;
+ // The character position of the character at raw_data[raw_data_pos_].
+ // Not necessarily the same as pos_.
+ unsigned raw_character_position_;
};
// UTF16 buffer to read characters from an external string.
-template <typename StringType, typename CharType>
-class ExternalStringUTF16Buffer: public UTF16Buffer {
+class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
public:
- ExternalStringUTF16Buffer();
- virtual ~ExternalStringUTF16Buffer() {}
- void Initialize(Handle<StringType> data,
- int start_position,
- int end_position);
- virtual void PushBack(uc32 ch);
- virtual uc32 Advance();
- virtual void SeekForward(int pos);
+ ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
+ int start_position,
+ int end_position);
+ virtual ~ExternalTwoByteStringUC16CharacterStream();
- private:
- const CharType* raw_data_; // Pointer to the actual array of characters.
+ virtual void PushBack(uc16 character) {
+ ASSERT(buffer_cursor_ > raw_data_);
+ buffer_cursor_--;
+ pos_--;
+ }
+ protected:
+ virtual unsigned SlowSeekForward(unsigned delta) {
+ // Fast case always handles seeking.
+ return 0;
+ }
+ virtual bool ReadBlock() {
+ // Entire string is read at start.
+ return false;
+ }
+ Handle<ExternalTwoByteString> source_;
+ const uc16* raw_data_; // Pointer to the actual array of characters.
};
-// Initializes a UTF16Buffer as input stream, using one of a number
-// of strategies depending on the available character sources.
-class StreamInitializer {
- public:
- UTF16Buffer* Init(Handle<String> source,
- unibrow::CharacterStream* stream,
- int start_position,
- int end_position);
- private:
- // Different UTF16 buffers used to pull characters from. Based on input one of
- // these will be initialized as the actual data source.
- CharacterStreamUTF16Buffer char_stream_buffer_;
- ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
- two_byte_string_buffer_;
- ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
-
- // Used to convert the source string into a character stream when a stream
- // is not passed to the scanner.
- SafeStringInputBuffer safe_string_input_buffer_;
-};
-
// ----------------------------------------------------------------------------
// V8JavaScriptScanner
// JavaScript scanner getting its input from either a V8 String or a unicode
@@ -103,19 +133,9 @@
class V8JavaScriptScanner : public JavaScriptScanner {
public:
- V8JavaScriptScanner() {}
-
- // Initialize the Scanner to scan source.
- void Initialize(Handle<String> source, int literal_flags = kAllLiterals);
- void Initialize(Handle<String> source,
- unibrow::CharacterStream* stream,
+ V8JavaScriptScanner();
+ void Initialize(UC16CharacterStream* source,
int literal_flags = kAllLiterals);
- void Initialize(Handle<String> source,
- int start_position, int end_position,
- int literal_flags = kAllLiterals);
-
- protected:
- StreamInitializer stream_initializer_;
};
@@ -123,8 +143,7 @@
public:
JsonScanner();
- // Initialize the Scanner to scan source.
- void Initialize(Handle<String> source);
+ void Initialize(UC16CharacterStream* source);
// Returns the next token.
Token::Value Next();
@@ -138,7 +157,7 @@
// Recognizes all of the single-character tokens directly, or calls a function
// to scan a number, string or identifier literal.
// The only allowed whitespace characters between tokens are tab,
- // carrige-return, newline and space.
+ // carriage-return, newline and space.
void ScanJson();
// A JSON number (production JSONNumber) is a subset of the valid JavaScript
@@ -159,60 +178,8 @@
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
-
- StreamInitializer stream_initializer_;
};
-
-// ExternalStringUTF16Buffer
-template <typename StringType, typename CharType>
-ExternalStringUTF16Buffer<StringType, CharType>::ExternalStringUTF16Buffer()
- : raw_data_(NULL) { }
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::Initialize(
- Handle<StringType> data,
- int start_position,
- int end_position) {
- ASSERT(!data.is_null());
- raw_data_ = data->resource()->data();
-
- ASSERT(end_position <= data->length());
- if (start_position > 0) {
- SeekForward(start_position);
- }
- end_ =
- end_position != kNoEndPosition ? end_position : data->length();
-}
-
-
-template <typename StringType, typename CharType>
-uc32 ExternalStringUTF16Buffer<StringType, CharType>::Advance() {
- if (pos_ < end_) {
- return raw_data_[pos_++];
- } else {
- // note: currently the following increment is necessary to avoid a
- // test-parser problem!
- pos_++;
- return static_cast<uc32>(-1);
- }
-}
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::PushBack(uc32 ch) {
- pos_--;
- ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
- ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
-}
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
- pos_ = pos;
-}
-
} } // namespace v8::internal
#endif // V8_SCANNER_H_
diff --git a/src/serialize.cc b/src/serialize.cc
index 6edc4fa..00a601f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -486,6 +486,18 @@
UNCLASSIFIED,
36,
"LDoubleConstant::one_half");
+ Add(ExternalReference::address_of_negative_infinity().address(),
+ UNCLASSIFIED,
+ 37,
+ "LDoubleConstant::negative_infinity");
+ Add(ExternalReference::power_double_double_function().address(),
+ UNCLASSIFIED,
+ 38,
+ "power_double_double_function");
+ Add(ExternalReference::power_double_int_function().address(),
+ UNCLASSIFIED,
+ 39,
+ "power_double_int_function");
}
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index ad09ae2..b5ee1e4 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -481,7 +481,7 @@
}
// -----------------------------------------------------------------------------
-// LargeObjectSpace
+// NewSpace
MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
@@ -501,6 +501,18 @@
}
+template <typename StringType>
+void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
+ ASSERT(length <= string->length());
+ ASSERT(string->IsSeqString());
+ ASSERT(string->address() + StringType::SizeFor(string->length()) ==
+ allocation_info_.top);
+ allocation_info_.top =
+ string->address() + StringType::SizeFor(length);
+ string->set_length(length);
+}
+
+
bool FreeListNode::IsFreeListNode(HeapObject* object) {
return object->map() == Heap::raw_unchecked_byte_array_map()
|| object->map() == Heap::raw_unchecked_one_pointer_filler_map()
diff --git a/src/spaces.cc b/src/spaces.cc
index 369eb6f..8e9d889 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -917,7 +917,7 @@
}
more_fine_grained_table = new AtomicWord[words_needed];
for (int i = 0; i < words_needed; i++) {
- more_fine_grained_table[i] = NULL;
+ more_fine_grained_table[i] = kUnusedChunkTableEntry;
}
Release_Store(&table[index],
reinterpret_cast<AtomicWord>(more_fine_grained_table));
diff --git a/src/spaces.h b/src/spaces.h
index 2e85003..2c17ef0 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1643,6 +1643,11 @@
virtual bool ReserveSpace(int bytes);
+ // Resizes a sequential string which must be the most recent thing that was
+ // allocated in new space.
+ template <typename StringType>
+ inline void ShrinkStringAtAllocationBoundary(String* string, int len);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
virtual void Protect();
diff --git a/src/string.js b/src/string.js
index 3b3c82b..6d4e0c1 100644
--- a/src/string.js
+++ b/src/string.js
@@ -101,28 +101,28 @@
// ECMA-262 section 15.5.4.7
-function StringIndexOf(searchString /* position */) { // length == 1
- var subject_str = TO_STRING_INLINE(this);
- var pattern_str = TO_STRING_INLINE(searchString);
- var subject_str_len = subject_str.length;
- var pattern_str_len = pattern_str.length;
+function StringIndexOf(pattern /* position */) { // length == 1
+ var subject = TO_STRING_INLINE(this);
+ var pattern = TO_STRING_INLINE(pattern);
+ var subject_len = subject.length;
+ var pattern_len = pattern.length;
var index = 0;
if (%_ArgumentsLength() > 1) {
var arg1 = %_Arguments(1); // position
index = TO_INTEGER(arg1);
}
if (index < 0) index = 0;
- if (index > subject_str_len) index = subject_str_len;
- if (pattern_str_len + index > subject_str_len) return -1;
- return %StringIndexOf(subject_str, pattern_str, index);
+ if (index > subject_len) index = subject_len;
+ if (pattern_len + index > subject_len) return -1;
+ return %StringIndexOf(subject, pattern, index);
}
// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(searchString /* position */) { // length == 1
+function StringLastIndexOf(pat /* position */) { // length == 1
var sub = TO_STRING_INLINE(this);
var subLength = sub.length;
- var pat = TO_STRING_INLINE(searchString);
+ var pat = TO_STRING_INLINE(pat);
var patLength = pat.length;
var index = subLength - patLength;
if (%_ArgumentsLength() > 1) {
@@ -150,10 +150,8 @@
// do anything locale specific.
function StringLocaleCompare(other) {
if (%_ArgumentsLength() === 0) return 0;
-
- var this_str = TO_STRING_INLINE(this);
- var other_str = TO_STRING_INLINE(other);
- return %StringLocaleCompare(this_str, other_str);
+ return %StringLocaleCompare(TO_STRING_INLINE(this),
+ TO_STRING_INLINE(other));
}
@@ -177,9 +175,7 @@
// otherwise we call the runtime system.
function SubString(string, start, end) {
// Use the one character string cache.
- if (start + 1 == end) {
- return %_StringCharAt(string, start);
- }
+ if (start + 1 == end) return %_StringCharAt(string, start);
return %_SubString(string, start, end);
}
@@ -208,7 +204,10 @@
replace);
}
} else {
- return StringReplaceRegExp(subject, search, replace);
+ return %StringReplaceRegExpWithString(subject,
+ search,
+ TO_STRING_INLINE(replace),
+ lastMatchInfo);
}
}
@@ -224,7 +223,11 @@
// Compute the string to replace with.
if (IS_FUNCTION(replace)) {
- builder.add(replace.call(null, search, start, subject));
+ builder.add(%_CallFunction(%GetGlobalReceiver(),
+ search,
+ start,
+ subject,
+ replace));
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
@@ -239,15 +242,6 @@
}
-// Helper function for regular expressions in String.prototype.replace.
-function StringReplaceRegExp(subject, regexp, replace) {
- return %StringReplaceRegExpWithString(subject,
- regexp,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
-}
-
-
// Expand the $-expressions in the string and return a new string with
// the result.
function ExpandReplacement(string, subject, matchInfo, builder) {
@@ -408,9 +402,7 @@
lastMatchInfoOverride = override;
var func_result =
%_CallFunction(receiver, elem, match_start, subject, replace);
- if (!IS_STRING(func_result)) {
- func_result = NonStringToString(func_result);
- }
+ func_result = TO_STRING_INLINE(func_result);
res[i] = func_result;
match_start += elem.length;
}
@@ -424,9 +416,7 @@
// Use the apply argument as backing for global RegExp properties.
lastMatchInfoOverride = elem;
var func_result = replace.apply(null, elem);
- if (!IS_STRING(func_result)) {
- func_result = NonStringToString(func_result);
- }
+ func_result = TO_STRING_INLINE(func_result);
res[i] = func_result;
}
i++;
@@ -487,8 +477,7 @@
} else {
regexp = new $RegExp(re);
}
- var s = TO_STRING_INLINE(this);
- var match = DoRegExpExec(regexp, s, 0);
+ var match = DoRegExpExec(regexp, TO_STRING_INLINE(this), 0);
if (match) {
return match[CAPTURE0];
}
@@ -576,14 +565,14 @@
while (true) {
if (startIndex === length) {
- result[result.length] = subject.slice(currentIndex, length);
+ result.push(subject.slice(currentIndex, length));
break;
}
var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
if (IS_NULL(matchInfo)) {
- result[result.length] = subject.slice(currentIndex, length);
+ result.push(subject.slice(currentIndex, length));
break;
}
@@ -595,17 +584,21 @@
continue;
}
- result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
+ result.push(SubString(subject, currentIndex, matchInfo[CAPTURE0]));
if (result.length === limit) break;
- var num_captures = NUMBER_OF_CAPTURES(matchInfo);
- for (var i = 2; i < num_captures; i += 2) {
- var start = matchInfo[CAPTURE(i)];
- var end = matchInfo[CAPTURE(i + 1)];
- if (start != -1 && end != -1) {
- result[result.length] = SubString(subject, start, end);
+ var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
+ for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
+ var start = matchInfo[i++];
+ var end = matchInfo[i++];
+ if (end != -1) {
+ if (start + 1 == end) {
+ result.push(%_StringCharAt(subject, start));
+ } else {
+ result.push(%_SubString(subject, start, end));
+ }
} else {
- result[result.length] = void 0;
+ result.push(void 0);
}
if (result.length === limit) break outer_loop;
}
@@ -656,7 +649,9 @@
}
}
- return SubString(s, start_i, end_i);
+ return (start_i + 1 == end_i
+ ? %_StringCharAt(s, start_i)
+ : %_SubString(s, start_i, end_i));
}
@@ -694,7 +689,9 @@
var end = start + len;
if (end > s.length) end = s.length;
- return SubString(s, start, end);
+ return (start + 1 == end
+ ? %_StringCharAt(s, start)
+ : %_SubString(s, start, end));
}
@@ -875,11 +872,6 @@
}
-function StringToJSON(key) {
- return CheckJSONPrimitive(this.valueOf());
-}
-
-
// -------------------------------------------------------------------
function SetupString() {
@@ -929,8 +921,7 @@
"small", StringSmall,
"strike", StringStrike,
"sub", StringSub,
- "sup", StringSup,
- "toJSON", StringToJSON
+ "sup", StringSup
));
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 651f018..86e7201 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1501,25 +1501,31 @@
}
-MaybeObject* CallStubCompiler::CompileCustomCall(int generator_id,
+bool CallStubCompiler::HasCustomCallGenerator(BuiltinFunctionId id) {
+#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
+ CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+ return false;
+}
+
+
+MaybeObject* CallStubCompiler::CompileCustomCall(BuiltinFunctionId id,
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* fname) {
- ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
- switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
- case k##name##CallGenerator: \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname);
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
+#define CALL_GENERATOR_CASE(name) \
+ if (id == k##name) { \
+ return CallStubCompiler::Compile##name##Call(object, \
+ holder, \
+ cell, \
+ function, \
+ fname); \
}
- UNREACHABLE();
+ CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+ ASSERT(!HasCustomCallGenerator(id));
return Heap::undefined_value();
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 7a6d400..a7829a6 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -643,37 +643,20 @@
};
-// List of functions with custom constant call IC stubs.
-//
-// Installation of custom call generators for the selected builtins is
-// handled by the bootstrapper.
-//
-// Each entry has a name of a global object property holding an object
-// optionally followed by ".prototype" (this controls whether the
-// generator is set on the object itself or, in case it's a function,
-// on the its instance prototype), a name of a builtin function on the
-// object (the one the generator is set for), and a name of the
-// generator (used to build ids and generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, floor, MathFloor) \
- V(Math, abs, MathAbs)
+// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
+// IC stubs.
+#define CUSTOM_CALL_IC_GENERATORS(V) \
+ V(ArrayPush) \
+ V(ArrayPop) \
+ V(StringCharCodeAt) \
+ V(StringCharAt) \
+ V(StringFromCharCode) \
+ V(MathFloor) \
+ V(MathAbs)
class CallStubCompiler: public StubCompiler {
public:
- enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
- k##name##CallGenerator,
- CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
-#undef DECLARE_CALL_GENERATOR_ID
- kNumCallGenerators
- };
-
CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
@@ -697,16 +680,20 @@
JSFunction* function,
String* name);
- // Compiles a custom call constant/global IC using the generator
- // with given id. For constant calls cell is NULL.
- MUST_USE_RESULT MaybeObject* CompileCustomCall(int generator_id,
+ static bool HasCustomCallGenerator(BuiltinFunctionId id);
+
+ private:
+ // Compiles a custom call constant/global IC. For constant calls
+ // cell is NULL. Returns undefined if there is no custom call code
+ // for the given function or it can't be generated.
+ MUST_USE_RESULT MaybeObject* CompileCustomCall(BuiltinFunctionId id,
Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name);
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
+#define DECLARE_CALL_GENERATOR(name) \
MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object, \
JSObject* holder, \
JSGlobalPropertyCell* cell, \
@@ -715,7 +702,6 @@
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
- private:
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
diff --git a/src/top.cc b/src/top.cc
index 6187ef0..3d86d11 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -40,7 +40,9 @@
namespace v8 {
namespace internal {
+#ifdef ENABLE_LOGGING_AND_PROFILING
Semaphore* Top::runtime_profiler_semaphore_ = NULL;
+#endif
ThreadLocalTop Top::thread_local_;
Mutex* Top::break_access_ = OS::CreateMutex();
@@ -277,8 +279,10 @@
void Top::Initialize() {
CHECK(!initialized);
+#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(runtime_profiler_semaphore_ == NULL);
runtime_profiler_semaphore_ = OS::CreateSemaphore(0);
+#endif
InitializeThreadLocal();
@@ -297,8 +301,10 @@
void Top::TearDown() {
if (initialized) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
delete runtime_profiler_semaphore_;
runtime_profiler_semaphore_ = NULL;
+#endif
// Remove the external reference to the preallocated stack memory.
if (preallocated_message_space != NULL) {
diff --git a/src/type-info.cc b/src/type-info.cc
index 5f6022b..8719439 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -142,6 +142,9 @@
CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
switch (state) {
case CompareIC::UNINITIALIZED:
+ // Uninitialized means never executed.
+ // TODO(fschneider): Introduce a separate value for never-executed ICs.
+ return unknown;
case CompareIC::SMIS:
return TypeInfo::Smi();
case CompareIC::HEAP_NUMBERS:
@@ -184,6 +187,9 @@
switch (type) {
case TRBinaryOpIC::UNINITIALIZED:
+ // Uninitialized means never executed.
+ // TODO(fschneider): Introduce a separate value for never-executed ICs
+ return unknown;
case TRBinaryOpIC::SMI:
switch (result_type) {
case TRBinaryOpIC::UNINITIALIZED:
@@ -224,6 +230,9 @@
CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
switch (state) {
case CompareIC::UNINITIALIZED:
+ // Uninitialized means never executed.
+ // TODO(fschneider): Introduce a separate value for never-executed ICs.
+ return unknown;
case CompareIC::SMIS:
return TypeInfo::Smi();
case CompareIC::HEAP_NUMBERS:
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 428ebc6..a5cf42e 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -247,7 +247,15 @@
SC(smi_checks_removed, V8.SmiChecksRemoved) \
SC(map_checks_removed, V8.MapChecksRemoved) \
SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)
+ SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
+ SC(instance_of, V8.InstanceOf) \
+ SC(instance_of_cache, V8.InstanceOfCache) \
+ SC(instance_of_stub_true, V8.InstanceOfStubTrue) \
+ SC(instance_of_stub_false, V8.InstanceOfStubFalse) \
+ SC(instance_of_stub_false_null, V8.InstanceOfStubFalseNull) \
+ SC(instance_of_stub_false_string, V8.InstanceOfStubFalseString) \
+ SC(instance_of_full, V8.InstanceOfFull) \
+ SC(instance_of_slow, V8.InstanceOfSlow)
// This file contains all the v8 counters that are in use.
diff --git a/src/v8natives.js b/src/v8natives.js
index 50a2774..9fd2162 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -563,7 +563,7 @@
}
// Step 7
- if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
+ if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
// Step 9
if (IsDataDescriptor(current) != IsDataDescriptor(desc))
@@ -615,12 +615,20 @@
} else {
flag |= READ_ONLY;
}
- %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
+ var value = void 0; // Default value is undefined.
+ if (desc.hasValue()) {
+ value = desc.getValue();
+ } else if (!IS_UNDEFINED(current)) {
+ value = current.getValue();
+ }
+ %DefineOrRedefineDataProperty(obj, p, value, flag);
} else {
- if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
+ if (desc.hasGetter() &&
+ (IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) {
%DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
}
- if (desc.hasSetter() && IS_FUNCTION(desc.getSet())) {
+ if (desc.hasSetter() &&
+ (IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
%DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
}
}
@@ -903,19 +911,13 @@
}
-function BooleanToJSON(key) {
- return CheckJSONPrimitive(this.valueOf());
-}
-
-
// ----------------------------------------------------------------------------
function SetupBoolean() {
InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
"toString", BooleanToString,
- "valueOf", BooleanValueOf,
- "toJSON", BooleanToJSON
+ "valueOf", BooleanValueOf
));
}
@@ -1015,18 +1017,6 @@
}
-function CheckJSONPrimitive(val) {
- if (!IsPrimitive(val))
- throw MakeTypeError('result_not_primitive', ['toJSON', val]);
- return val;
-}
-
-
-function NumberToJSON(key) {
- return CheckJSONPrimitive(this.valueOf());
-}
-
-
// ----------------------------------------------------------------------------
function SetupNumber() {
@@ -1067,15 +1057,13 @@
"valueOf", NumberValueOf,
"toFixed", NumberToFixed,
"toExponential", NumberToExponential,
- "toPrecision", NumberToPrecision,
- "toJSON", NumberToJSON
+ "toPrecision", NumberToPrecision
));
}
SetupNumber();
-
// ----------------------------------------------------------------------------
// Function
diff --git a/src/v8preparserdll-main.cc b/src/v8preparserdll-main.cc
new file mode 100644
index 0000000..c0344d3
--- /dev/null
+++ b/src/v8preparserdll-main.cc
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+
+#include "../include/v8-preparser.h"
+
+extern "C" {
+BOOL WINAPI DllMain(HANDLE hinstDLL,
+ DWORD dwReason,
+ LPVOID lpvReserved) {
+ // Do nothing.
+ return TRUE;
+}
+}
diff --git a/src/version.cc b/src/version.cc
index bfe9a33..60e61b1 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
-#define BUILD_NUMBER 2
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 3
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 540593f..456d076 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -30,11 +30,13 @@
#if defined(V8_TARGET_ARCH_X64)
#include "codegen-inl.h"
-#include "macro-assembler.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
namespace v8 {
namespace internal {
+
#define __ ACCESS_MASM(masm)
@@ -71,817 +73,6 @@
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Store the arguments adaptor context sentinel.
- __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
- // Push the function on the stack.
- __ push(rdi);
-
- // Preserve the number of arguments on the stack. Must preserve both
- // rax and rbx because these registers are used when copying the
- // arguments and the receiver.
- __ Integer32ToSmi(rcx, rax);
- __ push(rcx);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- // Remove caller arguments from the stack.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rdx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
-
- Label enough, too_few;
- __ cmpq(rax, rbx);
- __ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
-
- Label copy;
- __ bind(©);
- __ incq(rcx);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(rcx, rbx);
- __ j(less, ©);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
-
- Label copy;
- __ bind(©);
- __ incq(rcx);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(rcx, rax);
- __ j(less, ©);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&fill);
- __ incq(rcx);
- __ push(kScratchRegister);
- __ cmpq(rcx, rbx);
- __ j(less, &fill);
-
- // Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ call(rdx);
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(rdx);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
- // ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
- //
- // rax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- { Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done);
- __ pop(rbx);
- __ Push(Factory::undefined_value());
- __ push(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &non_function);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object);
-
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &convert_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
-
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- __ LeaveInternalFrame();
- // Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
-
- __ jmp(&shift_arguments);
- }
-
-
- // 3b. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ bind(&non_function);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
- __ xor_(rdi, rdi);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ movq(rcx, rax);
- __ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
- { Label function;
- __ testq(rdi, rdi);
- __ j(not_zero, &function);
- __ xor_(rbx, rbx);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
- __ j(not_equal,
- Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
-
- ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- // Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
- __ EnterInternalFrame();
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Change context eagerly to get the right global object if
- // necessary.
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ movq(rbx, Operand(rbp, kReceiverOffset));
- __ JumpIfSmi(rbx, &call_to_object);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &call_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
-
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
-
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove function, receiver, and arguments
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
- __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
- __ movq(result,
- Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int initial_capacity,
- Label* gc_required) {
- ASSERT(initial_capacity >= 0);
-
- // Load the initial map from the array function.
- __ movq(scratch1, FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, Factory::the_hole_value());
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(scratch1, 0), scratch3);
- __ addq(scratch1, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(scratch1, scratch2);
- __ j(below, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- Label not_empty, allocated;
-
- // Load the initial map from the array function.
- __ movq(elements_array,
- FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check whether an empty sized array is requested.
- __ testq(array_size, array_size);
- __ j(not_zero, ¬_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- __ bind(¬_empty);
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ bind(&allocated);
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, Factory::empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- Factory::fixed_array_map());
- Label not_empty_2, fill_array;
- __ SmiTest(array_size);
- __ j(not_zero, ¬_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
- Smi::FromInt(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(¬_empty_2);
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- __ bind(&fill_array);
- if (fill_with_hole) {
- Label loop, entry;
- __ Move(scratch, Factory::the_hole_value());
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- kPreallocatedArrayElements,
- call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), kScratchRegister);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
- Handle<Code> array_code(code);
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_constructor;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // does always have a map.
- GenerateLoadArrayFunction(masm, rbx);
- __ cmpq(rdi, rbx);
- __ Check(equal, "Unexpected Array function");
- // Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // TODO(849): implement custom construct stub.
- // Generate a copy of the generic stub for now.
- Generate_JSConstructStubGeneric(masm);
-}
-
-
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -1368,13 +559,18 @@
}
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
__ int3();
}
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- __ int3();
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1383,11 +579,813 @@
}
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ // Stack Layout:
+ // rsp[0]: Return address
+ // rsp[1]: Argument n
+ // rsp[2]: Argument n-1
+ // ...
+ // rsp[n]: Argument 1
+ // rsp[n+1]: Receiver (function to call)
+ //
+ // rax contains the number of arguments, n, not counting the receiver.
+ //
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ testq(rax, rax);
+ __ j(not_zero, &done);
+ __ pop(rbx);
+ __ Push(Factory::undefined_value());
+ __ push(rbx);
+ __ incq(rax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label non_function;
+ // The function to call is at position n+1 on the stack.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ JumpIfSmi(rdi, &non_function);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &non_function);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ JumpIfSmi(rbx, &convert_to_object);
+
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ j(equal, &use_global_receiver);
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &use_global_receiver);
+
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &convert_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(below_equal, &shift_arguments);
+
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
+
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+
+ __ pop(rax);
+ __ SmiToInteger32(rax, rax);
+ __ LeaveInternalFrame();
+ // Restore the function to rdi.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ jmp(&patch_receiver);
+
+ // Use the global receiver object from the called function as the
+ // receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+
+ __ jmp(&shift_arguments);
+ }
+
+
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ __ bind(&non_function);
+ __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ xor_(rdi, rdi);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ __ bind(&shift_arguments);
+ { Label loop;
+ __ movq(rcx, rax);
+ __ bind(&loop);
+ __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+ __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ decq(rcx);
+ __ j(not_sign, &loop); // While non-negative (to copy return address).
+ __ pop(rbx); // Discard copy of return address.
+ __ decq(rax); // One fewer argument (first argument is new receiver).
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ { Label function;
+ __ testq(rdi, rdi);
+ __ j(not_zero, &function);
+ __ xor_(rbx, rbx);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movsxlq(rbx,
+ FieldOperand(rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpq(rax, rbx);
+ __ j(not_equal,
+ Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
+ ParameterCount expected(0);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ // Stack at entry:
+ // rsp: return address
+ // rsp+8: arguments
+ // rsp+16: receiver ("this")
+ // rsp+24: function
+ __ EnterInternalFrame();
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
+
+ // Change context eagerly to get the right global object if
+ // necessary.
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ JumpIfSmi(rbx, &call_to_object);
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ j(equal, &use_global_receiver);
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &use_global_receiver);
+
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &call_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(below_equal, &push_receiver);
+
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Push the nth argument.
+ __ push(rax);
+
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ __ movq(Operand(rbp, kIndexOffset), rax);
+
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Invoke the function.
+ ParameterCount actual(rax);
+ __ SmiToInteger32(rax, rax);
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+ __ LeaveInternalFrame();
+ __ ret(3 * kPointerSize); // remove function, receiver, and arguments
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity >= 0);
+
+ // Load the initial map from the array function.
+ __ movq(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (initial_capacity == 0) {
+ __ Move(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
+ Smi::FromInt(initial_capacity));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ __ Move(scratch3, Factory::the_hole_value());
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ for (int i = 0; i < initial_capacity; i++) {
+ __ movq(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(scratch1, 0), scratch3);
+ __ addq(scratch1, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(scratch1, scratch2);
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ movq(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ testq(array_size, array_size);
+ __ j(not_zero, ¬_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(¬_empty);
+ SmiIndex index =
+ masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ index.scale,
+ index.reg,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ Move(elements_array, Factory::empty_fixed_array());
+ __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ SmiTest(array_size);
+ __ j(not_zero, ¬_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
+ Smi::FromInt(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(¬_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ Move(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(elements_array, 0), scratch);
+ __ addq(elements_array, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(elements_array, elements_array_end);
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// rdi: constructor (built-in Array function)
+// rax: argc
+// rsp[0]: return address
+// rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments.
+ __ testq(rax, rax);
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ rdi,
+ rbx,
+ rcx,
+ rdx,
+ r8,
+ kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmpq(rax, Immediate(1));
+ __ j(not_equal, &argc_two_or_more);
+ __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+ __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is to large to actually allocate an elements array.
+ __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
+ __ j(greater_equal, call_generic_code);
+
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0]: return address
+ // esp[8]: argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ movq(rdx, rax);
+ __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0] : return address
+ // esp[8] : last argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+
+ // rax: argc
+ // rbx: JSArray
+ // rcx: elements_array
+ // r8: elements_array_end (untagged)
+ // esp[0]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(r9, Operand(rsp, kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+ __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // rax: argc
+ // rbx: JSArray
+ // rdx: location of the first array element
+ // r9: location of the last argument
+ // esp[0]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), kScratchRegister);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // rax: argc
+ // rbx: JSArray
+ // esp[0]: return address
+ // esp[8]: last argument
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ push(rcx);
+ __ movq(rax, rbx);
+ __ ret(0);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Check(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx);
+ __ cmpq(rdi, rbx);
+ __ Check(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Check(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // TODO(849): implement custom construct stub.
+ // Generate a copy of the generic stub for now.
+ Generate_JSConstructStubGeneric(masm);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Store the arguments adaptor context sentinel.
+ __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+
+ // Push the function on the stack.
+ __ push(rdi);
+
+ // Preserve the number of arguments on the stack. Must preserve both
+ // rax and rbx because these registers are used when copying the
+ // arguments and the receiver.
+ __ Integer32ToSmi(rcx, rax);
+ __ push(rcx);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // Retrieve the number of arguments from the stack. Number is a Smi.
+ __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Leave the frame.
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+
+ // Remove caller arguments from the stack.
+ __ pop(rcx);
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ push(rcx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdx : code entry to call
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+ __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+ Label enough, too_few;
+ __ cmpq(rax, rbx);
+ __ j(less, &too_few);
+ __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ j(equal, &dont_adapt_arguments);
+
+ { // Enough parameters: Actual >= expected.
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ movq(rcx, Immediate(-1)); // account for receiver
+
+ Label copy;
+ __ bind(©);
+ __ incq(rcx);
+ __ push(Operand(rax, 0));
+ __ subq(rax, Immediate(kPointerSize));
+ __ cmpq(rcx, rbx);
+ __ j(less, ©);
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ movq(rcx, Immediate(-1)); // account for receiver
+
+ Label copy;
+ __ bind(©);
+ __ incq(rcx);
+ __ push(Operand(rdi, 0));
+ __ subq(rdi, Immediate(kPointerSize));
+ __ cmpq(rcx, rax);
+ __ j(less, ©);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ bind(&fill);
+ __ incq(rcx);
+ __ push(kScratchRegister);
+ __ cmpq(rcx, rbx);
+ __ j(less, &fill);
+
+ // Restore function pointer.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ __ call(rdx);
+
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ ret(0);
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ jmp(rdx);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ int3();
}
+#undef __
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 23700e1..6c8b333 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -6782,6 +6782,13 @@
Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
deferred->Branch(NegateCondition(both_smi));
+ // Check that both indices are valid.
+ __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
+ __ cmpl(tmp2.reg(), index1.reg());
+ deferred->Branch(below_equal);
+ __ cmpl(tmp2.reg(), index2.reg());
+ deferred->Branch(below_equal);
+
// Bring addresses into index1 and index2.
__ SmiToInteger32(index1.reg(), index1.reg());
__ lea(index1.reg(), FieldOperand(tmp1.reg(),
@@ -7867,7 +7874,7 @@
case Token::INSTANCEOF: {
Load(left);
Load(right);
- InstanceofStub stub;
+ InstanceofStub stub(InstanceofStub::kNoFlags);
Result answer = frame_->CallStub(&stub, 2);
answer.ToRegister();
__ testq(answer.reg(), answer.reg());
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 574688c..dd28d4d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -198,6 +198,11 @@
}
+void FullCodeGenerator::ClearAccumulator() {
+ __ xor_(rax, rax);
+}
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
NearLabel ok;
@@ -839,7 +844,9 @@
__ bind(&update_each);
__ movq(result_register(), rbx);
// Perform the assignment as if via '='.
- EmitAssignment(stmt->each());
+ { EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->AssignmentId());
+ }
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1413,6 +1420,7 @@
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
+ context()->Plug(rax);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -1521,7 +1529,7 @@
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1569,6 +1577,7 @@
break;
}
}
+ context()->Plug(rax);
}
@@ -1641,8 +1650,6 @@
}
__ bind(&done);
}
-
- context()->Plug(rax);
}
@@ -1679,10 +1686,9 @@
__ push(Operand(rsp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(rax);
- context()->DropAndPlug(1, rax);
- } else {
- context()->Plug(rax);
+ __ Drop(1);
}
+ context()->Plug(rax);
}
@@ -3127,6 +3133,7 @@
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ context.Plug(rax);
}
// For all contexts except kEffect: We have the result on
// top of the stack.
@@ -3137,6 +3144,7 @@
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
+ context()->Plug(rax);
}
break;
case NAMED_PROPERTY: {
@@ -3328,7 +3336,7 @@
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub;
+ InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
__ testq(rax, rax);
// The stub returns 0 for true.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 2002099..aff778a 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1951,10 +1951,8 @@
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
-#ifdef DEBUG
State previous_state = GetState();
-#endif
- State state = TargetState(x, y);
+ State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
@@ -1974,6 +1972,10 @@
#endif
}
+void PatchInlinedSmiCode(Address address) {
+ UNIMPLEMENTED();
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 0c1559b..f66ec16 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -216,11 +216,6 @@
return 0;
}
- int NearestNextGapPos(int index) const {
- UNIMPLEMENTED();
- return 0;
- }
-
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
#ifdef DEBUG
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 5bb5ffd..1df9b47 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2250,6 +2250,31 @@
}
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ movq(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (FLAG_debug_code) {
+ Label ok, fail;
+ CheckMap(map, Factory::meta_map(), &fail, false);
+ jmp(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
+}
+
+
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On Windows 64 stack slots are reserved by the caller for all arguments
// including the ones passed in registers, and space is always allocated for
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 348191e..d8f2fba 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -772,6 +772,13 @@
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 530222e..63e9769 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -948,8 +948,8 @@
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, NULL, function, name);
Object* result;
@@ -1830,8 +1830,8 @@
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasCustomCallGenerator()) {
- const int id = function_info->custom_call_generator_id();
+ if (function_info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, cell, function, name);
Object* result;