Version 2.2.1
Debugger improvements.
Fixed minor bugs.
git-svn-id: http://v8.googlecode.com/svn/trunk@4346 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0ca4d35..4e0801a 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -3587,13 +3587,22 @@
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
- // Seed the result with the formal parameters count, which will be used
- // in case no arguments adaptor frame is found below the current frame.
+ Label exit;
+
+ // Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to the arguments.length.
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
- frame_->CallStub(&stub, 0);
+ // Check if the calling frame is an arguments adaptor frame.
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
frame_->EmitPush(r0);
}
@@ -6791,26 +6800,6 @@
}
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Nothing to do: The formal number of parameters has already been
- // passed in register r0 by calling function. Just return it.
- __ Jump(lr);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame and return it.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Jump(lr);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 13d464d..0015f78 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -800,7 +800,7 @@
NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- LOG(RegExpCodeCreateEvent(*code, *source));
+ PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
diff --git a/src/array.js b/src/array.js
index a29015a..c367d7e 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1150,7 +1150,7 @@
"reduce", getFunction("reduce", ArrayReduce, 1),
"reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
));
-
+
%FinishArrayPrototypeSetup($Array.prototype);
}
diff --git a/src/ast.cc b/src/ast.cc
index 9204a84..75b2945 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -47,11 +47,8 @@
// ----------------------------------------------------------------------------
// All the Accept member functions for each syntax tree node type.
-#define DECL_ACCEPT(type) \
- void type::Accept(AstVisitor* v) { \
- if (v->CheckStackOverflow()) return; \
- v->Visit##type(this); \
- }
+#define DECL_ACCEPT(type) \
+ void type::Accept(AstVisitor* v) { v->Visit##type(this); }
AST_NODE_LIST(DECL_ACCEPT)
#undef DECL_ACCEPT
@@ -241,6 +238,13 @@
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
+bool AstVisitor::CheckStackOverflow() {
+ if (stack_overflow_) return true;
+ StackLimitCheck check;
+ if (!check.HasOverflowed()) return false;
+ return (stack_overflow_ = true);
+}
+
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
@@ -749,117 +753,6 @@
}
-static inline void MarkIfNotLive(Expression* expr, List<AstNode*>* stack) {
- if (!expr->is_live()) {
- expr->mark_as_live();
- stack->Add(expr);
- }
-}
-
-
-// Overloaded functions for marking children of live code as live.
-void VariableProxy::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- // A reference to a stack-allocated variable depends on all the
- // definitions reaching it.
- BitVector* defs = reaching_definitions();
- if (defs != NULL) {
- ASSERT(var()->IsStackAllocated());
- // The first variable_count definitions are the initial parameter and
- // local declarations.
- for (int i = variable_count; i < defs->length(); i++) {
- if (defs->Contains(i)) {
- MarkIfNotLive(body_definitions->at(i - variable_count), stack);
- }
- }
- }
-}
-
-
-void Literal::ProcessNonLiveChildren(List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- // Leaf node, no children.
-}
-
-
-void Assignment::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- Property* prop = target()->AsProperty();
- VariableProxy* proxy = target()->AsVariableProxy();
-
- if (prop != NULL) {
- if (!prop->key()->IsPropertyName()) MarkIfNotLive(prop->key(), stack);
- MarkIfNotLive(prop->obj(), stack);
- } else if (proxy == NULL) {
- // Must be a reference error.
- ASSERT(!target()->IsValidLeftHandSide());
- MarkIfNotLive(target(), stack);
- } else if (is_compound()) {
- // A variable assignment so lhs is an operand to the operation.
- MarkIfNotLive(target(), stack);
- }
- MarkIfNotLive(value(), stack);
-}
-
-
-void Property::ProcessNonLiveChildren(List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- if (!key()->IsPropertyName()) MarkIfNotLive(key(), stack);
- MarkIfNotLive(obj(), stack);
-}
-
-
-void Call::ProcessNonLiveChildren(List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- ZoneList<Expression*>* args = arguments();
- for (int i = args->length() - 1; i >= 0; i--) {
- MarkIfNotLive(args->at(i), stack);
- }
- MarkIfNotLive(expression(), stack);
-}
-
-
-void UnaryOperation::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- MarkIfNotLive(expression(), stack);
-}
-
-
-void CountOperation::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- MarkIfNotLive(expression(), stack);
-}
-
-
-void BinaryOperation::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- MarkIfNotLive(right(), stack);
- MarkIfNotLive(left(), stack);
-}
-
-
-void CompareOperation::ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- MarkIfNotLive(right(), stack);
- MarkIfNotLive(left(), stack);
-}
-
-
// Implementation of a copy visitor. The visitor create a deep copy
// of ast nodes. Nodes that do not require a deep copy are copied
// with the default copy constructor.
@@ -963,13 +856,11 @@
: Expression(other), op_(other->op_), expression_(expression) {}
-BinaryOperation::BinaryOperation(BinaryOperation* other,
+BinaryOperation::BinaryOperation(Expression* other,
+ Token::Value op,
Expression* left,
Expression* right)
- : Expression(other),
- op_(other->op_),
- left_(left),
- right_(right) {}
+ : Expression(other), op_(op), left_(left), right_(right) {}
CountOperation::CountOperation(CountOperation* other, Expression* expression)
@@ -1221,6 +1112,7 @@
void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
expr_ = new BinaryOperation(expr,
+ expr->op(),
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}
diff --git a/src/ast.h b/src/ast.h
index fa85eee..dfc08ee 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -294,19 +294,6 @@
bitfields_ |= NumBitOpsField::encode(num_bit_ops);
}
- // Functions used for dead-code elimination. Predicate is true if the
- // expression is not dead code.
- int is_live() const { return LiveField::decode(bitfields_); }
- void mark_as_live() { bitfields_ |= LiveField::encode(true); }
-
- // Mark non-live children as live and push them on a stack for further
- // processing.
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- }
-
private:
static const int kMaxNumBitOps = (1 << 5) - 1;
@@ -319,7 +306,6 @@
class ToInt32Field : public BitField<bool, 2, 1> {};
class NumBitOpsField : public BitField<int, 3, 5> {};
class LoopConditionField: public BitField<bool, 8, 1> {};
- class LiveField: public BitField<bool, 9, 1> {};
};
@@ -907,10 +893,6 @@
virtual bool IsTrivial() { return true; }
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@@ -1118,10 +1100,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
void SetIsPrimitive(bool value) { is_primitive_ = value; }
@@ -1260,10 +1238,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
@@ -1299,10 +1273,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1382,10 +1352,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1403,7 +1369,13 @@
ASSERT(Token::IsBinaryOp(op));
}
- BinaryOperation(BinaryOperation* other, Expression* left, Expression* right);
+ // Construct a binary operation with a given operator and left and right
+ // subexpressions. The rest of the expression state is copied from
+ // another expression.
+ BinaryOperation(Expression* other,
+ Token::Value op,
+ Expression* left,
+ Expression* right);
virtual void Accept(AstVisitor* v);
@@ -1412,10 +1384,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
@@ -1473,10 +1441,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
@@ -1510,10 +1474,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
@@ -1568,10 +1528,6 @@
virtual bool IsPrimitive();
virtual bool IsCritical();
- virtual void ProcessNonLiveChildren(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
@@ -2110,29 +2066,23 @@
AstVisitor() : stack_overflow_(false) { }
virtual ~AstVisitor() { }
- // Dispatch
- void Visit(AstNode* node) { node->Accept(this); }
+ // Stack overflow check and dynamic dispatch.
+ void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
- // Iteration
+ // Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
// Stack overflow tracking support.
bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check;
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
- }
+ bool CheckStackOverflow();
// If a stack-overflow exception is encountered when visiting a
// node, calling SetStackOverflow will make sure that the visitor
// bails out without visiting more nodes.
void SetStackOverflow() { stack_overflow_ = true; }
-
// Individual nodes
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 8a9fa4b..82a63f0 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -723,8 +723,68 @@
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
Top::initial_object_prototype(), Builtins::Illegal,
true);
-
global_context()->set_regexp_function(*regexp_fun);
+
+ ASSERT(regexp_fun->has_initial_map());
+ Handle<Map> initial_map(regexp_fun->initial_map());
+
+ ASSERT_EQ(0, initial_map->inobject_properties());
+
+ Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(5);
+ PropertyAttributes final =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ int enum_index = 0;
+ {
+ // ECMA-262, section 15.10.7.1.
+ FieldDescriptor field(Heap::source_symbol(),
+ JSRegExp::kSourceFieldIndex,
+ final,
+ enum_index++);
+ descriptors->Set(0, &field);
+ }
+ {
+ // ECMA-262, section 15.10.7.2.
+ FieldDescriptor field(Heap::global_symbol(),
+ JSRegExp::kGlobalFieldIndex,
+ final,
+ enum_index++);
+ descriptors->Set(1, &field);
+ }
+ {
+ // ECMA-262, section 15.10.7.3.
+ FieldDescriptor field(Heap::ignore_case_symbol(),
+ JSRegExp::kIgnoreCaseFieldIndex,
+ final,
+ enum_index++);
+ descriptors->Set(2, &field);
+ }
+ {
+ // ECMA-262, section 15.10.7.4.
+ FieldDescriptor field(Heap::multiline_symbol(),
+ JSRegExp::kMultilineFieldIndex,
+ final,
+ enum_index++);
+ descriptors->Set(3, &field);
+ }
+ {
+ // ECMA-262, section 15.10.7.5.
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ FieldDescriptor field(Heap::last_index_symbol(),
+ JSRegExp::kLastIndexFieldIndex,
+ writable,
+ enum_index++);
+ descriptors->Set(4, &field);
+ }
+ descriptors->SetNextEnumerationIndex(enum_index);
+ descriptors->Sort();
+
+ initial_map->set_inobject_properties(5);
+ initial_map->set_pre_allocated_property_fields(5);
+ initial_map->set_unused_property_fields(0);
+ initial_map->set_instance_size(
+ initial_map->instance_size() + 5 * kPointerSize);
+ initial_map->set_instance_descriptors(*descriptors);
}
{ // -- J S O N
diff --git a/src/builtins.cc b/src/builtins.cc
index feb912f..69f6413 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -268,9 +268,10 @@
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
- memcpy(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kPointerSize);
+ ASSERT(len > 0);
+ CopyWords(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
@@ -299,6 +300,73 @@
}
+static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
+ // For now this trick is only applied to fixed arrays in new space.
+ // In large object space the object's start must coincide with chunk
+ // and thus the trick is just not applicable.
+ // In old space we do not use this trick to avoid dealing with
+ // remembered sets.
+ ASSERT(Heap::new_space()->Contains(elms));
+
+ STATIC_ASSERT(FixedArray::kMapOffset == 0);
+ STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+
+ Object** former_start = HeapObject::RawField(elms, 0);
+
+ const int len = elms->length();
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
+
+ former_start[1] = Heap::fixed_array_map();
+ former_start[2] = reinterpret_cast<Object*>(len - 1);
+
+ ASSERT_EQ(elms->address() + kPointerSize, (elms + kPointerSize)->address());
+ return elms + kPointerSize;
+}
+
+
+static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
+ // For now this trick is only applied to fixed arrays in new space.
+ // In large object space the object's start must coincide with chunk
+ // and thus the trick is just not applicable.
+ // In old space we do not use this trick to avoid dealing with
+ // remembered sets.
+ ASSERT(Heap::new_space()->Contains(elms));
+
+ STATIC_ASSERT(FixedArray::kMapOffset == 0);
+ STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+
+ Object** former_start = HeapObject::RawField(elms, 0);
+
+ const int len = elms->length();
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ if (to_trim == 1) {
+ former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
+ } else if (to_trim == 2) {
+ former_start[0] = Heap::raw_unchecked_two_pointer_filler_map();
+ } else {
+ former_start[0] = Heap::raw_unchecked_byte_array_map();
+ ByteArray* as_byte_array = reinterpret_cast<ByteArray*>(elms);
+ as_byte_array->set_length(ByteArray::LengthFor(to_trim * kPointerSize));
+ }
+
+ former_start[to_trim] = Heap::fixed_array_map();
+ former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
+
+ ASSERT_EQ(elms->address() + to_trim * kPointerSize,
+ (elms + to_trim * kPointerSize)->address());
+ return elms + to_trim * kPointerSize;
+}
+
+
static bool ArrayPrototypeHasNoElements() {
// This method depends on non writability of Object and Array prototype
// fields.
@@ -390,7 +458,9 @@
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
- CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+ if (len > 0) {
+ CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+ }
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@@ -443,38 +513,6 @@
}
-static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
- // For now this trick is only applied to fixed arrays in new space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- // In old space we do not use this trick to avoid dealing with
- // remembered sets.
- ASSERT(Heap::new_space()->Contains(elms));
-
- Object** former_map =
- HeapObject::RawField(elms, FixedArray::kMapOffset);
- Object** former_length =
- HeapObject::RawField(elms, FixedArray::kLengthOffset);
- Object** former_first =
- HeapObject::RawField(elms, FixedArray::kHeaderSize);
- // Check that we don't forget to copy all the bits.
- STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize
- == FixedArray::kHeaderSize);
-
- int len = elms->length();
-
- *former_first = reinterpret_cast<Object*>(len - 1);
- *former_length = Heap::fixed_array_map();
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- *former_map = Heap::raw_unchecked_one_pointer_filler_map();
-
- ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address());
- return elms + kPointerSize;
-}
-
-
BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
@@ -537,7 +575,9 @@
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
- CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+ if (len > 0) {
+ CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+ }
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@@ -713,12 +753,27 @@
if (item_count < actual_delete_count) {
// Shrink the array.
- AssertNoAllocation no_gc;
- MoveElements(&no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ const bool trim_array = Heap::new_space()->Contains(elms) &&
+ ((actual_start + item_count) <
+ (len - actual_delete_count - actual_start));
+ if (trim_array) {
+ const int delta = actual_delete_count - item_count;
+
+ if (actual_start > 0) {
+ Object** start = elms->data_start();
+ memmove(start + delta, start, actual_start * kPointerSize);
+ }
+
+ elms = LeftTrimFixedArray(elms, delta);
+ array->set_elements(elms, SKIP_WRITE_BARRIER);
+ } else {
+ AssertNoAllocation no_gc;
+ MoveElements(&no_gc,
+ elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(elms, new_length, len);
+ }
} else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
@@ -734,11 +789,16 @@
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
- CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
- CopyElements(&no_gc,
- new_elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
+ if (actual_start > 0) {
+ CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
+ }
+ const int to_copy = len - actual_delete_count - actual_start;
+ if (to_copy > 0) {
+ CopyElements(&no_gc,
+ new_elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ to_copy);
+ }
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@@ -812,10 +872,12 @@
int start_pos = 0;
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
- FixedArray* elms = FixedArray::cast(array->elements());
int len = Smi::cast(array->length())->value();
- CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
- start_pos += len;
+ if (len > 0) {
+ FixedArray* elms = FixedArray::cast(array->elements());
+ CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
+ start_pos += len;
+ }
}
ASSERT(start_pos == result_len);
@@ -1431,8 +1493,8 @@
}
}
// Log the event and add the code to the builtins array.
- LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), functions[i].s_name));
+ PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), functions[i].s_name));
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
diff --git a/src/circular-queue.h b/src/circular-queue.h
index dce7fc2..486f107 100644
--- a/src/circular-queue.h
+++ b/src/circular-queue.h
@@ -119,6 +119,8 @@
byte* positions_;
ProducerPosition* producer_pos_;
ConsumerPosition* consumer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index ea74898..9d5969b 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -64,7 +64,7 @@
OPROFILE(CreateNativeCodeRegion(GetName(),
code->instruction_start(),
code->instruction_size()));
- LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+ PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
diff --git a/src/codegen.cc b/src/codegen.cc
index 56d8f4b..5bbf050 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -235,7 +235,7 @@
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::is_logging()) return false;
+ if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
@@ -454,7 +454,6 @@
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
- case READ_LENGTH: GenerateReadLength(masm); break;
case READ_ELEMENT: GenerateReadElement(masm); break;
case NEW_OBJECT: GenerateNewObject(masm); break;
}
diff --git a/src/codegen.h b/src/codegen.h
index 0dfea8d..9685ee7 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -498,7 +498,6 @@
class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
- READ_LENGTH,
READ_ELEMENT,
NEW_OBJECT
};
@@ -512,7 +511,6 @@
int MinorKey() { return type_; }
void Generate(MacroAssembler* masm);
- void GenerateReadLength(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewObject(MacroAssembler* masm);
diff --git a/src/compiler.cc b/src/compiler.cc
index c9dd107..611c0bd 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -90,33 +90,13 @@
}
if (FLAG_use_flow_graph) {
- int variable_count =
- function->num_parameters() + function->scope()->num_stack_slots();
- FlowGraphBuilder builder(variable_count);
- builder.Build(function);
-
- if (!builder.HasStackOverflow()) {
- if (variable_count > 0) {
- ReachingDefinitions rd(builder.postorder(),
- builder.body_definitions(),
- variable_count);
- rd.Compute();
-
- TypeAnalyzer ta(builder.postorder(),
- builder.body_definitions(),
- variable_count,
- function->num_parameters());
- ta.Compute();
-
- MarkLiveCode(builder.preorder(),
- builder.body_definitions(),
- variable_count);
- }
- }
+ FlowGraphBuilder builder;
+ FlowGraph* graph = builder.Build(function);
+ USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- builder.graph()->PrintText(function, builder.postorder());
+ graph->PrintAsText(function->name());
}
#endif
}
@@ -237,14 +217,14 @@
}
if (script->name()->IsString()) {
- LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
- *code, String::cast(script->name())));
+ PROFILE(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, String::cast(script->name())));
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
code->instruction_start(),
code->instruction_size()));
} else {
- LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
- *code, ""));
+ PROFILE(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, ""));
OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
code->instruction_start(),
code->instruction_size()));
@@ -499,33 +479,13 @@
}
if (FLAG_use_flow_graph) {
- int variable_count =
- literal->num_parameters() + literal->scope()->num_stack_slots();
- FlowGraphBuilder builder(variable_count);
- builder.Build(literal);
-
- if (!builder.HasStackOverflow()) {
- if (variable_count > 0) {
- ReachingDefinitions rd(builder.postorder(),
- builder.body_definitions(),
- variable_count);
- rd.Compute();
-
- TypeAnalyzer ta(builder.postorder(),
- builder.body_definitions(),
- variable_count,
- literal->num_parameters());
- ta.Compute();
-
- MarkLiveCode(builder.preorder(),
- builder.body_definitions(),
- variable_count);
- }
- }
+ FlowGraphBuilder builder;
+ FlowGraph* graph = builder.Build(literal);
+ USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- builder.graph()->PrintText(literal, builder.postorder());
+ graph->PrintAsText(literal->name());
}
#endif
}
@@ -625,20 +585,22 @@
// Log the code generation. If source information is available
// include script name and line number. Check explicitly whether
// logging is enabled as finding the line number is not free.
- if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+ if (Logger::is_logging()
+ || OProfileAgent::is_enabled()
+ || CpuProfiler::is_profiling()) {
Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
USE(line_num);
- LOG(CodeCreateEvent(tag, *code, *func_name,
- String::cast(script->name()), line_num));
+ PROFILE(CodeCreateEvent(tag, *code, *func_name,
+ String::cast(script->name()), line_num));
OPROFILE(CreateNativeCodeRegion(*func_name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size()));
} else {
- LOG(CodeCreateEvent(tag, *code, *func_name));
+ PROFILE(CodeCreateEvent(tag, *code, *func_name));
OPROFILE(CreateNativeCodeRegion(*func_name,
code->instruction_start(),
code->instruction_size()));
diff --git a/src/compiler.h b/src/compiler.h
index ecc7b1c..ade21f5 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -138,10 +138,7 @@
// There should always be a function literal, but it may be set after
// construction (for lazy compilation).
FunctionLiteral* function() { return function_; }
- void set_function(FunctionLiteral* literal) {
- ASSERT(function_ == NULL);
- function_ = literal;
- }
+ void set_function(FunctionLiteral* literal) { function_ = literal; }
// Simple accessors.
bool is_eval() { return is_eval_; }
diff --git a/src/conversions.cc b/src/conversions.cc
index 5c46752..2929191 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -48,51 +48,6 @@
return -1;
}
-
-// Provide a common interface to getting a character at a certain
-// index from a char* or a String object.
-static inline int GetChar(const char* str, int index) {
- ASSERT(index >= 0 && index < StrLength(str));
- return str[index];
-}
-
-
-static inline int GetChar(String* str, int index) {
- return str->Get(index);
-}
-
-
-static inline int GetLength(const char* str) {
- return StrLength(str);
-}
-
-
-static inline int GetLength(String* str) {
- return str->length();
-}
-
-
-static inline const char* GetCString(const char* str, int index) {
- return str + index;
-}
-
-
-static inline const char* GetCString(String* str, int index) {
- int length = str->length();
- char* result = NewArray<char>(length + 1);
- for (int i = index; i < length; i++) {
- uc16 c = str->Get(i);
- if (c <= 127) {
- result[i - index] = static_cast<char>(c);
- } else {
- result[i - index] = 127; // Force number parsing to fail.
- }
- }
- result[length - index] = '\0';
- return result;
-}
-
-
namespace {
// C++-style iterator adaptor for StringInputBuffer
@@ -134,15 +89,6 @@
}
-static inline void ReleaseCString(const char* original, const char* str) {
-}
-
-
-static inline void ReleaseCString(String* original, const char* str) {
- DeleteArray(const_cast<char *>(str));
-}
-
-
template <class Iterator, class EndMark>
static bool SubStringEquals(Iterator* current,
EndMark end,
@@ -168,97 +114,6 @@
// we don't need to preserve all the digits.
const int kMaxSignificantDigits = 772;
-// Parse an int from a string starting a given index and in a given
-// radix. The string can be either a char* or a String*.
-template <class S>
-static int InternalStringToInt(S* s, int i, int radix, double* value) {
- int len = GetLength(s);
-
- // Setup limits for computing the value.
- ASSERT(2 <= radix && radix <= 36);
- int lim_0 = '0' + (radix < 10 ? radix : 10);
- int lim_a = 'a' + (radix - 10);
- int lim_A = 'A' + (radix - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid loosing precision.
-
- double v = 0.0;
- int j;
- for (j = i; j < len;) {
- // Parse the longest part of the string starting at index j
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- uint32_t part = 0, multiplier = 1;
- int k;
- for (k = j; k < len; k++) {
- int c = GetChar(s, k);
- if (c >= '0' && c < lim_0) {
- c = c - '0';
- } else if (c >= 'a' && c < lim_a) {
- c = c - 'a' + 10;
- } else if (c >= 'A' && c < lim_A) {
- c = c - 'A' + 10;
- } else {
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- static const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
- uint32_t m = multiplier * radix;
- if (m > kMaximumMultiplier) break;
- part = part * radix + c;
- multiplier = m;
- ASSERT(multiplier > part);
- }
-
- // Compute the number of part digits. If no digits were parsed;
- // we're done parsing the entire string.
- int digits = k - j;
- if (digits == 0) break;
-
- // Update the value and skip the part in the string.
- ASSERT(multiplier ==
- pow(static_cast<double>(radix), static_cast<double>(digits)));
- v = v * multiplier + part;
- j = k;
- }
-
- // If the resulting value is larger than 2^53 the value does not fit
- // in the mantissa of the double and there is a loss of precision.
- // When the value is larger than 2^53 the rounding depends on the
- // code generation. If the code generator spills the double value
- // it uses 64 bits and if it does not it uses 80 bits.
- //
- // If there is a potential for overflow we resort to strtod for
- // radix 10 numbers to get higher precision. For numbers in another
- // radix we live with the loss of precision.
- static const double kPreciseConversionLimit = 9007199254740992.0;
- if (radix == 10 && v > kPreciseConversionLimit) {
- const char* cstr = GetCString(s, i);
- const char* end;
- v = gay_strtod(cstr, &end);
- ReleaseCString(s, cstr);
- }
-
- *value = v;
- return j;
-}
-
-
-int StringToInt(String* str, int index, int radix, double* value) {
- return InternalStringToInt(str, index, radix, value);
-}
-
-
-int StringToInt(const char* str, int index, int radix, double* value) {
- return InternalStringToInt(const_cast<char*>(str), index, radix, value);
-}
-
static const double JUNK_STRING_VALUE = OS::nan_value();
@@ -274,73 +129,294 @@
}
-template <class Iterator, class EndMark>
-static double InternalHexadecimalStringToDouble(Iterator current,
- EndMark end,
- char* buffer,
- bool allow_trailing_junk) {
+static bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix)
+ || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+ || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool sign) {
+ return sign ? -0.0 : 0.0;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2, class Iterator, class EndMark>
+static double InternalStringToIntDouble(Iterator current,
+ EndMark end,
+ bool sign,
+ bool allow_trailing_junk) {
ASSERT(current != end);
- const int max_hex_significant_digits = 52 / 4 + 2;
- // We reuse the buffer of InternalStringToDouble. Since hexadecimal
- // numbers may have much less digits than decimal the buffer won't overflow.
- ASSERT(max_hex_significant_digits < kMaxSignificantDigits);
-
- int significant_digits = 0;
- int insignificant_digits = 0;
- bool leading_zero = false;
- // A double has a 53bit significand (once the hidden bit has been added).
- // Halfway cases thus have at most 54bits. Therefore 54/4 + 1 digits are
- // sufficient to represent halfway cases. By adding another digit we can keep
- // track of dropped digits.
- int buffer_pos = 0;
- bool nonzero_digit_dropped = false;
-
// Skip leading 0s.
while (*current == '0') {
- leading_zero = true;
++current;
- if (current == end) return 0;
+ if (current == end) return SignedZero(sign);
}
- int begin_pos = buffer_pos;
- while ((*current >= '0' && *current <= '9')
- || (*current >= 'a' && *current <= 'f')
- || (*current >= 'A' && *current <= 'F')) {
- if (significant_digits <= max_hex_significant_digits) {
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+
+ do {
+ int digit;
+ if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+ digit = static_cast<char>(*current) - '0';
+ } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+ digit = static_cast<char>(*current) - 'a' + 10;
+ } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+ digit = static_cast<char>(*current) - 'A' + 10;
} else {
- insignificant_digits++;
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ if (allow_trailing_junk || !AdvanceToNonspace(¤t, end)) {
+ break;
+ } else {
+ return JUNK_STRING_VALUE;
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> 53);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent = overflow_bits_count;
+
+ bool zero_tail = true;
+ while (true) {
+ ++current;
+ if (current == end || !isDigit(*current, radix)) break;
+ zero_tail = zero_tail && *current == '0';
+ exponent += radix_log_2;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & ((int64_t)1 << 53)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
}
++current;
- if (current == end) break;
+ } while (current != end);
+
+ ASSERT(number < ((int64_t)1 << 53));
+ ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ if (exponent == 0) {
+ if (sign) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
}
+ ASSERT(number != 0);
+ // The double could be constructed faster from number (mantissa), exponent
+ // and sign. Assuming it's a rare case more simple code is used.
+ return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
+}
+
+
+template <class Iterator, class EndMark>
+static double InternalStringToInt(Iterator current, EndMark end, int radix) {
+ const bool allow_trailing_junk = true;
+ const double empty_string_val = JUNK_STRING_VALUE;
+
+ if (!AdvanceToNonspace(¤t, end)) return empty_string_val;
+
+ bool sign = false;
+ bool leading_zero = false;
+
+ if (*current == '+') {
+ // Ignore leading sign; skip following spaces.
+ ++current;
+ if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ } else if (*current == '-') {
+ ++current;
+ if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ sign = true;
+ }
+
+ if (radix == 0) {
+ // Radix detection.
+ if (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(sign);
+ if (*current == 'x' || *current == 'X') {
+ radix = 16;
+ ++current;
+ if (current == end) return JUNK_STRING_VALUE;
+ } else {
+ radix = 8;
+ leading_zero = true;
+ }
+ } else {
+ radix = 10;
+ }
+ } else if (radix == 16) {
+ if (*current == '0') {
+ // Allow "0x" prefix.
+ ++current;
+ if (current == end) return SignedZero(sign);
+ if (*current == 'x' || *current == 'X') {
+ ++current;
+ if (current == end) return JUNK_STRING_VALUE;
+ } else {
+ leading_zero = true;
+ }
+ }
+ }
+
+ if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
+
+ // Skip leading zeros.
+ while (*current == '0') {
+ leading_zero = true;
+ ++current;
+ if (current == end) return SignedZero(sign);
+ }
+
+ if (!leading_zero && !isDigit(*current, radix)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ if (IsPowerOf2(radix)) {
+ switch (radix) {
+ case 2:
+ return InternalStringToIntDouble<1>(
+ current, end, sign, allow_trailing_junk);
+ case 4:
+ return InternalStringToIntDouble<2>(
+ current, end, sign, allow_trailing_junk);
+ case 8:
+ return InternalStringToIntDouble<3>(
+ current, end, sign, allow_trailing_junk);
+
+ case 16:
+ return InternalStringToIntDouble<4>(
+ current, end, sign, allow_trailing_junk);
+
+ case 32:
+ return InternalStringToIntDouble<5>(
+ current, end, sign, allow_trailing_junk);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (radix == 10) {
+ // Parsing with strtod.
+ const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
+ // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
+ // end.
+ const int kBufferSize = kMaxSignificantDigits + 2;
+ char buffer[kBufferSize];
+ int buffer_pos = 0;
+ while (*current >= '0' && *current <= '9') {
+ if (buffer_pos <= kMaxSignificantDigits) {
+ // If the number has more than kMaxSignificantDigits it will be parsed
+ // as infinity.
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ }
+ ++current;
+ if (current == end) break;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = '\0';
+ return sign ? -gay_strtod(buffer, NULL) : gay_strtod(buffer, NULL);
+ }
+
+ // The following code causes accumulating rounding error for numbers greater
+ // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+ // 16, or 32, then mathInt may be an implementation-dependent approximation to
+ // the mathematical integer value" (15.1.2.2).
+
+ int lim_0 = '0' + (radix < 10 ? radix : 10);
+ int lim_a = 'a' + (radix - 10);
+ int lim_A = 'A' + (radix - 10);
+
+ // NOTE: The code for computing the value may seem a bit complex at
+ // first glance. It is structured to use 32-bit multiply-and-add
+ // loops as long as possible to avoid loosing precision.
+
+ double v = 0.0;
+ bool done = false;
+ do {
+ // Parse the longest part of the string starting at index j
+ // possible while keeping the multiplier, and thus the part
+ // itself, within 32 bits.
+ unsigned int part = 0, multiplier = 1;
+ while (true) {
+ int d;
+ if (*current >= '0' && *current < lim_0) {
+ d = *current - '0';
+ } else if (*current >= 'a' && *current < lim_a) {
+ d = *current - 'a' + 10;
+ } else if (*current >= 'A' && *current < lim_A) {
+ d = *current - 'A' + 10;
+ } else {
+ done = true;
+ break;
+ }
+
+ // Update the value of the part as long as the multiplier fits
+ // in 32 bits. When we can't guarantee that the next iteration
+ // will not overflow the multiplier, we stop parsing the part
+ // by leaving the loop.
+ const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
+ uint32_t m = multiplier * radix;
+ if (m > kMaximumMultiplier) break;
+ part = part * radix + d;
+ multiplier = m;
+ ASSERT(multiplier > part);
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+
+ // Update the value and skip the part in the string.
+ v = v * multiplier + part;
+ } while (!done);
+
if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
return JUNK_STRING_VALUE;
}
- if (significant_digits == 0) {
- return leading_zero ? 0 : JUNK_STRING_VALUE;
- }
-
- if (nonzero_digit_dropped) {
- ASSERT(insignificant_digits > 0);
- insignificant_digits--;
- buffer[buffer_pos++] = '1';
- }
-
- buffer[buffer_pos] = '\0';
-
- double result;
- StringToInt(buffer, begin_pos, 16, &result);
- if (insignificant_digits > 0) {
- // Multiplying by a power of 2 doesn't cause a loss of precision.
- result *= pow(16.0, insignificant_digits);
- }
- return result;
+ return sign ? -v : v;
}
@@ -377,8 +453,9 @@
int significant_digits = 0;
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
+ bool fractional_part = false;
- double signed_zero = 0.0;
+ bool sign = false;
if (*current == '+') {
// Ignore leading sign; skip following spaces.
@@ -388,7 +465,7 @@
buffer[buffer_pos++] = '-';
++current;
if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
- signed_zero = -0.0;
+ sign = true;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -408,26 +485,28 @@
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return signed_zero;
+ if (current == end) return SignedZero(sign);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end) return JUNK_STRING_VALUE; // "0x".
+ if (current == end || !isDigit(*current, 16)) {
+ return JUNK_STRING_VALUE; // "0x".
+ }
- double result = InternalHexadecimalStringToDouble(current,
- end,
- buffer + buffer_pos,
- allow_trailing_junk);
- return (buffer_pos > 0 && buffer[0] == '-') ? -result : result;
+ bool sign = (buffer_pos > 0 && buffer[0] == '-');
+ return InternalStringToIntDouble<4>(current,
+ end,
+ sign,
+ allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return signed_zero;
+ if (current == end) return SignedZero(sign);
}
}
@@ -454,8 +533,6 @@
}
if (*current == '.') {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = '.';
++current;
if (current == end) {
if (significant_digits == 0 && !leading_zero) {
@@ -471,11 +548,15 @@
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return signed_zero;
+ if (current == end) return SignedZero(sign);
exponent--; // Move this 0 into the exponent.
}
}
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = '.';
+ fractional_part = true;
+
// There is the fractional part.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
@@ -557,22 +638,13 @@
exponent += insignificant_digits;
if (octal) {
- buffer[buffer_pos] = '\0';
- // ALLOW_OCTALS is set and there is no '8' or '9' in insignificant
- // digits. Check significant digits now.
- char sign = '+';
- const char* s = buffer;
- if (*s == '-' || *s == '+') sign = *s++;
+ bool sign = buffer[0] == '-';
+ int start_pos = (sign ? 1 : 0);
- double result;
- s += StringToInt(s, 0, 8, &result);
- if (!allow_trailing_junk && *s != '\0') return JUNK_STRING_VALUE;
-
- if (sign == '-') result = -result;
- if (insignificant_digits > 0) {
- result *= pow(8.0, insignificant_digits);
- }
- return result;
+ return InternalStringToIntDouble<3>(buffer + start_pos,
+ buffer + buffer_pos,
+ sign,
+ allow_trailing_junk);
}
if (nonzero_digit_dropped) {
@@ -580,6 +652,11 @@
buffer[buffer_pos++] = '1';
}
+ // If the number has no more than kMaxDigitsInInt digits and doesn't have
+ // fractional part it could be parsed faster (without checks for
+ // spaces, overflow, etc.).
+ const int kMaxDigitsInInt = 9 * sizeof(int) / 4; // NOLINT
+
if (exponent != 0) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = 'e';
@@ -597,6 +674,16 @@
}
ASSERT(exponent == 0);
buffer_pos += exp_digits;
+ } else if (!fractional_part && significant_digits <= kMaxDigitsInInt) {
+ if (significant_digits == 0) return SignedZero(sign);
+ ASSERT(buffer_pos > 0);
+ int num = 0;
+ int start_pos = (buffer[0] == '-' ? 1 : 0);
+ for (int i = start_pos; i < buffer_pos; i++) {
+ ASSERT(buffer[i] >= '0' && buffer[i] <= '9');
+ num = 10 * num + (buffer[i] - '0');
+ }
+ return static_cast<double>(start_pos == 0 ? num : -num);
}
ASSERT(buffer_pos < kBufferSize);
@@ -625,6 +712,25 @@
}
+double StringToInt(String* str, int radix) {
+ StringShape shape(str);
+ if (shape.IsSequentialAscii()) {
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* end = begin + str->length();
+ return InternalStringToInt(begin, end, radix);
+ } else if (shape.IsSequentialTwoByte()) {
+ const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+ const uc16* end = begin + str->length();
+ return InternalStringToInt(begin, end, radix);
+ } else {
+ StringInputBuffer buffer(str);
+ return InternalStringToInt(StringInputBufferIterator(&buffer),
+ StringInputBufferIterator::EndMarker(),
+ radix);
+ }
+}
+
+
double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
diff --git a/src/conversions.h b/src/conversions.h
index 4aaf0c0..c4ceea6 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -100,8 +100,7 @@
double StringToDouble(String* str, int flags, double empty_string_val = 0);
// Converts a string into an integer.
-int StringToInt(String* str, int index, int radix, double* value);
-int StringToInt(const char* str, int index, int radix, double* value);
+double StringToInt(String* str, int radix);
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index 26ab643..7855aa5 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -28,23 +28,45 @@
#ifndef V8_CPU_PROFILER_INL_H_
#define V8_CPU_PROFILER_INL_H_
+#include "cpu-profiler.h"
+
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
-#include "cpu-profiler.h"
-
namespace v8 {
namespace internal {
+void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->AddCode(start, entry, size);
+}
+
+
+void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->MoveCode(from, to);
+}
+
+
+void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->DeleteCode(start);
+}
+
+
+void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->AddAlias(alias, start);
+}
+
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
TickSampleEventRecord* evt =
- reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.Enqueue());
+ TickSampleEventRecord::cast(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
return &evt->sample;
}
-
} } // namespace v8::internal
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
#endif // V8_CPU_PROFILER_INL_H_
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index d16c17f..4869e76 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -29,10 +29,13 @@
#include "cpu-profiler-inl.h"
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
+#include "log-inl.h"
+
namespace v8 {
namespace internal {
-
static const int kEventsBufferSize = 256*KB;
static const int kTickSamplesBufferChunkSize = 64*KB;
static const int kTickSamplesBufferChunksCount = 16;
@@ -48,6 +51,21 @@
enqueue_order_(0) { }
+void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
+ const char* prefix,
+ String* name,
+ Address start) {
+ CodeEventsContainer evt_rec;
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->type = CodeEventRecord::CODE_CREATION;
+ rec->order = ++enqueue_order_;
+ rec->start = start;
+ rec->entry = generator_->NewCodeEntry(tag, prefix, name);
+ rec->size = 1;
+ events_buffer_.Enqueue(evt_rec);
+}
+
+
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name,
@@ -163,7 +181,7 @@
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
const TickSampleEventRecord* rec =
- reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.StartDequeue());
+ TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return false;
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
@@ -196,4 +214,253 @@
}
+CpuProfiler* CpuProfiler::singleton_ = NULL;
+
+void CpuProfiler::StartProfiling(const char* title) {
+ ASSERT(singleton_ != NULL);
+ singleton_->StartCollectingProfile(title);
+}
+
+
+void CpuProfiler::StartProfiling(String* title) {
+ ASSERT(singleton_ != NULL);
+ singleton_->StartCollectingProfile(title);
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(const char* title) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->StopCollectingProfile(title);
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(String* title) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->StopCollectingProfile(title);
+}
+
+
+int CpuProfiler::GetProfilesCount() {
+ ASSERT(singleton_ != NULL);
+ return singleton_->profiles_->profiles()->length();
+}
+
+
+CpuProfile* CpuProfiler::GetProfile(int index) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->profiles_->profiles()->at(index);
+}
+
+
+CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->profiles_->GetProfile(uid);
+}
+
+
+TickSample* CpuProfiler::TickSampleEvent() {
+ ASSERT(singleton_ != NULL);
+ if (singleton_->is_profiling()) {
+ return singleton_->processor_->TickSampleEvent();
+ } else {
+ return NULL;
+ }
+}
+
+
+void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
+ singleton_->processor_->CallbackCreateEvent(
+ Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, const char* comment) {
+ singleton_->processor_->CodeCreateEvent(
+ tag, comment, code->address(), code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, String* name) {
+ singleton_->processor_->CodeCreateEvent(
+ tag,
+ name,
+ Heap::empty_string(),
+ CodeEntry::kNoLineNumberInfo,
+ code->address(),
+ code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, String* name,
+ String* source, int line) {
+ singleton_->processor_->CodeCreateEvent(
+ tag,
+ name,
+ source,
+ line,
+ code->address(),
+ code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, int args_count) {
+ singleton_->processor_->CodeCreateEvent(
+ tag,
+ args_count,
+ code->address(),
+ code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeMoveEvent(Address from, Address to) {
+ singleton_->processor_->CodeMoveEvent(from, to);
+}
+
+
+void CpuProfiler::CodeDeleteEvent(Address from) {
+ singleton_->processor_->CodeDeleteEvent(from);
+}
+
+
+void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
+ singleton_->processor_->FunctionCreateEvent(
+ function->address(), function->code()->address());
+}
+
+
+void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
+ singleton_->processor_->FunctionMoveEvent(from, to);
+}
+
+
+void CpuProfiler::FunctionDeleteEvent(Address from) {
+ singleton_->processor_->FunctionDeleteEvent(from);
+}
+
+
+void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
+ singleton_->processor_->CallbackCreateEvent(
+ Logger::CALLBACK_TAG, "get ", name, entry_point);
+}
+
+
+void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
+ singleton_->processor_->CodeCreateEvent(
+ Logger::REG_EXP_TAG,
+ source,
+ Heap::empty_string(),
+ CodeEntry::kNoLineNumberInfo,
+ code->address(),
+ code->ExecutableSize());
+}
+
+
+void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
+ singleton_->processor_->CallbackCreateEvent(
+ Logger::CALLBACK_TAG, "set ", name, entry_point);
+}
+
+
+CpuProfiler::CpuProfiler()
+ : profiles_(new CpuProfilesCollection()),
+ next_profile_uid_(1),
+ generator_(NULL),
+ processor_(NULL) {
+}
+
+
+CpuProfiler::~CpuProfiler() {
+ delete profiles_;
+}
+
+
+void CpuProfiler::StartCollectingProfile(const char* title) {
+ if (profiles_->StartProfiling(title, ++next_profile_uid_)) {
+ StartProcessorIfNotStarted();
+ }
+}
+
+
+void CpuProfiler::StartCollectingProfile(String* title) {
+ if (profiles_->StartProfiling(title, ++next_profile_uid_)) {
+ StartProcessorIfNotStarted();
+ }
+}
+
+
+void CpuProfiler::StartProcessorIfNotStarted() {
+ if (processor_ == NULL) {
+ generator_ = new ProfileGenerator(profiles_);
+ processor_ = new ProfilerEventsProcessor(generator_);
+ processor_->Start();
+ // Enumerate stuff we already have in the heap.
+ if (Heap::HasBeenSetup()) {
+ Logger::LogCodeObjects();
+ Logger::LogCompiledFunctions();
+ Logger::LogFunctionObjects();
+ Logger::LogAccessorCallbacks();
+ }
+ // Enable stack sampling.
+ Logger::ticker_->Start();
+ }
+}
+
+
+CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
+ StopProcessorIfLastProfile();
+ CpuProfile* result = profiles_->StopProfiling(title);
+ if (result != NULL) {
+ result->Print();
+ }
+ return result;
+}
+
+
+CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
+ StopProcessorIfLastProfile();
+ return profiles_->StopProfiling(title);
+}
+
+
+void CpuProfiler::StopProcessorIfLastProfile() {
+ if (profiles_->is_last_profile()) {
+ Logger::ticker_->Stop();
+ processor_->Stop();
+ processor_->Join();
+ delete processor_;
+ delete generator_;
+ processor_ = NULL;
+ generator_ = NULL;
+ }
+}
+
+} } // namespace v8::internal
+
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
+namespace v8 {
+namespace internal {
+
+void CpuProfiler::Setup() {
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (singleton_ == NULL) {
+ singleton_ = new CpuProfiler();
+ }
+#endif
+}
+
+
+void CpuProfiler::TearDown() {
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (singleton_ != NULL) {
+ delete singleton_;
+ }
+ singleton_ = NULL;
+#endif
+}
+
} } // namespace v8::internal
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 8a7d2fd..864df78 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -28,12 +28,20 @@
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
#include "circular-queue.h"
-#include "profile-generator.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class CodeEntry;
+class CodeMap;
+class CpuProfile;
+class CpuProfilesCollection;
+class ProfileGenerator;
+
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@@ -63,9 +71,7 @@
CodeEntry* entry;
unsigned size;
- INLINE(void UpdateCodeMap(CodeMap* code_map)) {
- code_map->AddCode(start, entry, size);
- }
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -74,9 +80,7 @@
Address from;
Address to;
- INLINE(void UpdateCodeMap(CodeMap* code_map)) {
- code_map->MoveCode(from, to);
- }
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -84,9 +88,7 @@
public:
Address start;
- INLINE(void UpdateCodeMap(CodeMap* code_map)) {
- code_map->DeleteCode(start);
- }
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -95,13 +97,11 @@
Address alias;
Address start;
- INLINE(void UpdateCodeMap(CodeMap* code_map)) {
- code_map->AddAlias(alias, start);
- }
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
};
-class TickSampleEventRecord {
+class TickSampleEventRecord BASE_EMBEDDED {
public:
// In memory, the first machine word of a TickSampleEventRecord will be the
// first entry of TickSample, that is -- a program counter field.
@@ -110,18 +110,12 @@
TickSample sample;
unsigned order;
-#if defined(__GNUC__) && (__GNUC__ < 4)
- // Added to avoid 'all member functions in class are private' warning.
- INLINE(unsigned get_order() const) { return order; }
- // Added to avoid 'class only defines private constructors and
- // has no friends' warning.
- friend class TickSampleEventRecordFriend;
-#endif
- private:
- // Disable instantiation.
- TickSampleEventRecord();
+ static TickSampleEventRecord* cast(void* value) {
+ return reinterpret_cast<TickSampleEventRecord*>(value);
+ }
- DISALLOW_COPY_AND_ASSIGN(TickSampleEventRecord);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
@@ -138,6 +132,9 @@
INLINE(bool running()) { return running_; }
// Events adding methods. Called by VM threads.
+ void CallbackCreateEvent(Logger::LogEventsAndTags tag,
+ const char* prefix, String* name,
+ Address start);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number,
@@ -179,7 +176,93 @@
unsigned enqueue_order_;
};
+} } // namespace v8::internal
+
+
+#define PROFILE(Call) \
+ LOG(Call); \
+ do { \
+ if (v8::internal::CpuProfiler::is_profiling()) { \
+ v8::internal::CpuProfiler::Call; \
+ } \
+ } while (false)
+#else
+#define PROFILE(Call) LOG(Call)
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
+
+namespace v8 {
+namespace internal {
+
+class CpuProfiler {
+ public:
+ static void Setup();
+ static void TearDown();
+
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ static void StartProfiling(const char* title);
+ static void StartProfiling(String* title);
+ static CpuProfile* StopProfiling(const char* title);
+ static CpuProfile* StopProfiling(String* title);
+ static int GetProfilesCount();
+ static CpuProfile* GetProfile(int index);
+ static CpuProfile* FindProfile(unsigned uid);
+
+ // Invoked from stack sampler (thread or signal handler.)
+ static TickSample* TickSampleEvent();
+
+ // Must be called via PROFILE macro, otherwise will crash when
+ // profiling is not enabled.
+ static void CallbackEvent(String* name, Address entry_point);
+ static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, const char* comment);
+ static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, String* name);
+ static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, String* name,
+ String* source, int line);
+ static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code, int args_count);
+ static void CodeMoveEvent(Address from, Address to);
+ static void CodeDeleteEvent(Address from);
+ static void FunctionCreateEvent(JSFunction* function);
+ static void FunctionMoveEvent(Address from, Address to);
+ static void FunctionDeleteEvent(Address from);
+ static void GetterCallbackEvent(String* name, Address entry_point);
+ static void RegExpCodeCreateEvent(Code* code, String* source);
+ static void SetterCallbackEvent(String* name, Address entry_point);
+
+ static INLINE(bool is_profiling()) {
+ ASSERT(singleton_ != NULL);
+ return singleton_->processor_ != NULL;
+ }
+
+ private:
+ CpuProfiler();
+ ~CpuProfiler();
+ void StartCollectingProfile(const char* title);
+ void StartCollectingProfile(String* title);
+ void StartProcessorIfNotStarted();
+ CpuProfile* StopCollectingProfile(const char* title);
+ CpuProfile* StopCollectingProfile(String* title);
+ void StopProcessorIfLastProfile();
+
+ CpuProfilesCollection* profiles_;
+ unsigned next_profile_uid_;
+ ProfileGenerator* generator_;
+ ProfilerEventsProcessor* processor_;
+
+ static CpuProfiler* singleton_;
+
+#else
+ static INLINE(bool is_profiling()) { return false; }
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
+};
} } // namespace v8::internal
+
#endif // V8_CPU_PROFILER_H_
diff --git a/src/d8.js b/src/d8.js
index be4a051..369ab65 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -164,7 +164,7 @@
Debug.State.currentFrame = 0;
details.text = result;
break;
-
+
case 'exception':
if (body.uncaught) {
result += 'Uncaught: ';
@@ -212,7 +212,7 @@
function SourceInfo(body) {
var result = '';
-
+
if (body.script) {
if (body.script.name) {
result += body.script.name;
@@ -224,7 +224,7 @@
result += body.sourceLine + 1;
result += ' column ';
result += body.sourceColumn + 1;
-
+
return result;
}
@@ -297,20 +297,20 @@
case 'bt':
this.request_ = this.backtraceCommandToJSONRequest_(args);
break;
-
+
case 'frame':
case 'f':
this.request_ = this.frameCommandToJSONRequest_(args);
break;
-
+
case 'scopes':
this.request_ = this.scopesCommandToJSONRequest_(args);
break;
-
+
case 'scope':
this.request_ = this.scopeCommandToJSONRequest_(args);
break;
-
+
case 'print':
case 'p':
this.request_ = this.printCommandToJSONRequest_(args);
@@ -331,16 +331,16 @@
case 'source':
this.request_ = this.sourceCommandToJSONRequest_(args);
break;
-
+
case 'scripts':
this.request_ = this.scriptsCommandToJSONRequest_(args);
break;
-
+
case 'break':
case 'b':
this.request_ = this.breakCommandToJSONRequest_(args);
break;
-
+
case 'clear':
this.request_ = this.clearCommandToJSONRequest_(args);
break;
@@ -365,7 +365,7 @@
default:
throw new Error('Unknown command "' + cmd + '"');
}
-
+
last_cmd = cmd;
}
@@ -490,22 +490,22 @@
case 'i':
request.arguments.stepaction = 'in';
break;
-
+
case 'min':
case 'm':
request.arguments.stepaction = 'min';
break;
-
+
case 'next':
case 'n':
request.arguments.stepaction = 'next';
break;
-
+
case 'out':
case 'o':
request.arguments.stepaction = 'out';
break;
-
+
default:
throw new Error('Invalid step argument "' + args[0] + '".');
}
@@ -523,7 +523,7 @@
DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
// Build a backtrace request from the text command.
var request = this.createRequest('backtrace');
-
+
// Default is to show top 10 frames.
request.arguments = {};
request.arguments.fromFrame = 0;
@@ -626,7 +626,7 @@
if (args.length == 0) {
throw new Error('Missing object id.');
}
-
+
return this.makeReferencesJSONRequest_(args, 'referencedBy');
};
@@ -637,7 +637,7 @@
if (args.length == 0) {
throw new Error('Missing object id.');
}
-
+
// Build a references request.
return this.makeReferencesJSONRequest_(args, 'constructedBy');
};
@@ -691,18 +691,18 @@
case 'natives':
request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
break;
-
+
case 'extensions':
request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
break;
-
+
case 'all':
request.arguments.types =
ScriptTypeFlag(Debug.ScriptType.Normal) |
ScriptTypeFlag(Debug.ScriptType.Native) |
ScriptTypeFlag(Debug.ScriptType.Extension);
break;
-
+
default:
throw new Error('Invalid argument "' + args[0] + '".');
}
@@ -740,7 +740,7 @@
type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
-
+
// Check for both line and column.
pos = tmp.indexOf(':');
if (pos > 0) {
@@ -755,7 +755,7 @@
} else {
type = 'function';
}
-
+
request.arguments = {};
request.arguments.type = type;
request.arguments.target = target;
@@ -936,13 +936,13 @@
result += body.breakpoint;
details.text = result;
break;
-
+
case 'clearbreakpoint':
result = 'cleared breakpoint #';
result += body.breakpoint;
details.text = result;
break;
-
+
case 'backtrace':
if (body.totalFrames == 0) {
result = '(empty stack)';
@@ -956,14 +956,14 @@
}
details.text = result;
break;
-
+
case 'frame':
details.text = SourceUnderline(body.sourceLineText,
body.column);
Debug.State.currentSourceLine = body.line;
Debug.State.currentFrame = body.index;
break;
-
+
case 'scopes':
if (body.totalScopes == 0) {
result = '(no scopes)';
@@ -987,7 +987,7 @@
result += formatObject_(scope_object_value, true);
details.text = result;
break;
-
+
case 'evaluate':
case 'lookup':
if (last_cmd == 'p' || last_cmd == 'print') {
@@ -1031,7 +1031,7 @@
}
details.text = result;
break;
-
+
case 'source':
// Get the source from the response.
var source = body.source;
@@ -1066,7 +1066,7 @@
}
details.text = result;
break;
-
+
case 'scripts':
var result = '';
for (i = 0; i < body.length; i++) {
@@ -1128,7 +1128,7 @@
case 'continue':
details.text = "(running)";
break;
-
+
default:
details.text =
'Response for unknown command \'' + response.command + '\'' +
@@ -1137,7 +1137,7 @@
} catch (e) {
details.text = 'Error: "' + e + '" formatting response';
}
-
+
return details;
};
@@ -1254,7 +1254,7 @@
/**
- * Get a metadata field from a protocol value.
+ * Get a metadata field from a protocol value.
* @return {Object} the metadata field value
*/
ProtocolValue.prototype.field = function(name) {
@@ -1435,12 +1435,12 @@
function BooleanToJSON_(value) {
- return String(value);
+ return String(value);
}
function NumberToJSON_(value) {
- return String(value);
+ return String(value);
}
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 1bc77c0..4e7620a 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -28,7 +28,6 @@
#include "v8.h"
#include "data-flow.h"
-#include "flow-graph.h"
#include "scopes.h"
namespace v8 {
@@ -621,21 +620,34 @@
void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
ASSERT(av_.IsEmpty());
- if (expr->target()->AsProperty() != NULL) {
- // Visit receiver and key of property store and rhs.
- Visit(expr->target()->AsProperty()->obj());
- ProcessExpression(expr->target()->AsProperty()->key());
- ProcessExpression(expr->value());
+ // There are three kinds of assignments: variable assignments, property
+ // assignments, and reference errors (invalid left-hand sides).
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
- // If we have a variable as a receiver in a property store, check if
- // we can mark it as trivial.
- MarkIfTrivial(expr->target()->AsProperty()->obj());
+ if (var != NULL) {
+ MarkIfTrivial(expr->value());
+ Visit(expr->value());
+ if (expr->is_compound()) {
+ // Left-hand side occurs also as an rvalue.
+ MarkIfTrivial(expr->target());
+ ProcessExpression(expr->target());
+ }
+ RecordAssignedVar(var);
+
+ } else if (prop != NULL) {
+ MarkIfTrivial(expr->value());
+ Visit(expr->value());
+ if (!prop->key()->IsPropertyName()) {
+ MarkIfTrivial(prop->key());
+ ProcessExpression(prop->key());
+ }
+ MarkIfTrivial(prop->obj());
+ ProcessExpression(prop->obj());
+
} else {
Visit(expr->target());
- ProcessExpression(expr->value());
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- if (var != NULL) RecordAssignedVar(var);
}
}
@@ -648,12 +660,12 @@
void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
ASSERT(av_.IsEmpty());
- Visit(expr->obj());
- ProcessExpression(expr->key());
-
- // In case we have a variable as a receiver, check if we can mark
- // it as trivial.
+ if (!expr->key()->IsPropertyName()) {
+ MarkIfTrivial(expr->key());
+ Visit(expr->key());
+ }
MarkIfTrivial(expr->obj());
+ ProcessExpression(expr->obj());
}
@@ -713,25 +725,19 @@
void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(av_.IsEmpty());
- Visit(expr->left());
-
- ProcessExpression(expr->right());
-
- // In case we have a variable on the left side, check if we can mark
- // it as trivial.
+ MarkIfTrivial(expr->right());
+ Visit(expr->right());
MarkIfTrivial(expr->left());
+ ProcessExpression(expr->left());
}
void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
ASSERT(av_.IsEmpty());
- Visit(expr->left());
-
- ProcessExpression(expr->right());
-
- // In case we have a variable on the left side, check if we can mark
- // it as trivial.
+ MarkIfTrivial(expr->right());
+ Visit(expr->right());
MarkIfTrivial(expr->left());
+ ProcessExpression(expr->left());
}
@@ -746,802 +752,4 @@
}
-int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
- // Parameters are numbered left-to-right from the beginning of the bit
- // set. Stack-allocated locals are allocated right-to-left from the end.
- ASSERT(var != NULL && var->IsStackAllocated());
- Slot* slot = var->slot();
- if (slot->type() == Slot::PARAMETER) {
- return slot->index();
- } else {
- return (variable_count - 1) - slot->index();
- }
-}
-
-
-void Node::InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark) {
- ASSERT(!IsMarkedWith(mark));
- rd_.Initialize(definition_count);
- MarkWith(mark);
- worklist->Insert(this);
-}
-
-
-void BlockNode::InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark) {
- ASSERT(!IsMarkedWith(mark));
- int instruction_count = instructions_.length();
- int variable_count = variables->length();
-
- rd_.Initialize(definition_count);
- // The RD_in set for the entry node has a definition for each parameter
- // and local.
- if (predecessor_ == NULL) {
- for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
- }
-
- for (int i = 0; i < instruction_count; i++) {
- Expression* expr = instructions_[i]->AsExpression();
- if (expr == NULL) continue;
- Variable* var = expr->AssignedVariable();
- if (var == NULL || !var->IsStackAllocated()) continue;
-
- // All definitions of this variable are killed.
- BitVector* def_set =
- variables->at(ReachingDefinitions::IndexFor(var, variable_count));
- rd_.kill()->Union(*def_set);
-
- // All previously generated definitions are not generated.
- rd_.gen()->Subtract(*def_set);
-
- // This one is generated.
- rd_.gen()->Add(expr->num());
- }
-
- // Add all blocks except the entry node to the worklist.
- if (predecessor_ != NULL) {
- MarkWith(mark);
- worklist->Insert(this);
- }
-}
-
-
-void ExitNode::ComputeRDOut(BitVector* result) {
- // Should not be the predecessor of any node.
- UNREACHABLE();
-}
-
-
-void BlockNode::ComputeRDOut(BitVector* result) {
- // All definitions reaching this block ...
- *result = *rd_.rd_in();
- // ... except those killed by the block ...
- result->Subtract(*rd_.kill());
- // ... but including those generated by the block.
- result->Union(*rd_.gen());
-}
-
-
-void BranchNode::ComputeRDOut(BitVector* result) {
- // Branch nodes don't kill or generate definitions.
- *result = *rd_.rd_in();
-}
-
-
-void JoinNode::ComputeRDOut(BitVector* result) {
- // Join nodes don't kill or generate definitions.
- *result = *rd_.rd_in();
-}
-
-
-void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- // The exit node has no successors so we can just update in place. New
- // RD_in is the union over all predecessors.
- int definition_count = rd_.rd_in()->length();
- rd_.rd_in()->Clear();
-
- BitVector temp(definition_count);
- for (int i = 0, len = predecessors_.length(); i < len; i++) {
- // Because ComputeRDOut always overwrites temp and its value is
- // always read out before calling ComputeRDOut again, we do not
- // have to clear it on each iteration of the loop.
- predecessors_[i]->ComputeRDOut(&temp);
- rd_.rd_in()->Union(temp);
- }
-}
-
-
-void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- // The entry block has no predecessor. Its RD_in does not change.
- if (predecessor_ == NULL) return;
-
- BitVector new_rd_in(rd_.rd_in()->length());
- predecessor_->ComputeRDOut(&new_rd_in);
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successor to the worklist if not already present.
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- worklist->Insert(successor_);
- }
-}
-
-
-void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- BitVector new_rd_in(rd_.rd_in()->length());
- predecessor_->ComputeRDOut(&new_rd_in);
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successors to the worklist if not already present.
- if (!successor0_->IsMarkedWith(mark)) {
- successor0_->MarkWith(mark);
- worklist->Insert(successor0_);
- }
- if (!successor1_->IsMarkedWith(mark)) {
- successor1_->MarkWith(mark);
- worklist->Insert(successor1_);
- }
-}
-
-
-void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- int definition_count = rd_.rd_in()->length();
- BitVector new_rd_in(definition_count);
-
- // New RD_in is the union over all predecessors.
- BitVector temp(definition_count);
- for (int i = 0, len = predecessors_.length(); i < len; i++) {
- predecessors_[i]->ComputeRDOut(&temp);
- new_rd_in.Union(temp);
- }
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successor to the worklist if not already present.
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- worklist->Insert(successor_);
- }
-}
-
-
-void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
- // Nothing to do.
-}
-
-
-void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
- // Propagate RD_in from the start of the block to all the variable
- // references.
- int variable_count = variables->length();
- BitVector rd = *rd_.rd_in();
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- Expression* expr = instructions_[i]->AsExpression();
- if (expr == NULL) continue;
-
- // Look for a variable reference to record its reaching definitions.
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy == NULL) {
- // Not a VariableProxy? Maybe it's a count operation.
- CountOperation* count_operation = expr->AsCountOperation();
- if (count_operation != NULL) {
- proxy = count_operation->expression()->AsVariableProxy();
- }
- }
- if (proxy == NULL) {
- // OK, Maybe it's a compound assignment.
- Assignment* assignment = expr->AsAssignment();
- if (assignment != NULL && assignment->is_compound()) {
- proxy = assignment->target()->AsVariableProxy();
- }
- }
-
- if (proxy != NULL &&
- proxy->var()->IsStackAllocated() &&
- !proxy->var()->is_this()) {
- // All definitions for this variable.
- BitVector* definitions =
- variables->at(ReachingDefinitions::IndexFor(proxy->var(),
- variable_count));
- BitVector* reaching_definitions = new BitVector(*definitions);
- // Intersected with all definitions (of any variable) reaching this
- // instruction.
- reaching_definitions->Intersect(rd);
- proxy->set_reaching_definitions(reaching_definitions);
- }
-
- // It may instead (or also) be a definition. If so update the running
- // value of reaching definitions for the block.
- Variable* var = expr->AssignedVariable();
- if (var == NULL || !var->IsStackAllocated()) continue;
-
- // All definitions of this variable are killed.
- BitVector* def_set =
- variables->at(ReachingDefinitions::IndexFor(var, variable_count));
- rd.Subtract(*def_set);
- // This definition is generated.
- rd.Add(expr->num());
- }
-}
-
-
-void ReachingDefinitions::Compute() {
- // The definitions in the body plus an implicit definition for each
- // variable at function entry.
- int definition_count = body_definitions_->length() + variable_count_;
- int node_count = postorder_->length();
-
- // Step 1: For each stack-allocated variable, identify the set of all its
- // definitions.
- List<BitVector*> variables;
- for (int i = 0; i < variable_count_; i++) {
- // Add the initial definition for each variable.
- BitVector* initial = new BitVector(definition_count);
- initial->Add(i);
- variables.Add(initial);
- }
- for (int i = 0, len = body_definitions_->length(); i < len; i++) {
- // Account for each definition in the body as a definition of the
- // defined variable.
- Variable* var = body_definitions_->at(i)->AssignedVariable();
- variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
- }
-
- // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
- // all nodes, and mark and add all nodes to the worklist in reverse
- // postorder. All nodes should currently have the same mark.
- bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
- WorkList<Node> worklist(node_count);
- for (int i = node_count - 1; i >= 0; i--) {
- postorder_->at(i)->InitializeReachingDefinitions(definition_count,
- &variables,
- &worklist,
- mark);
- }
-
- // Step 3: Until the worklist is empty, remove an item compute and update
- // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
- // all necessary successors to the worklist.
- while (!worklist.is_empty()) {
- Node* node = worklist.Remove();
- node->MarkWith(!mark);
- node->UpdateRDIn(&worklist, mark);
- }
-
- // Step 4: Based on RD_in for block nodes, propagate reaching definitions
- // to all variable uses in the block.
- for (int i = 0; i < node_count; i++) {
- postorder_->at(i)->PropagateReachingDefinitions(&variables);
- }
-}
-
-
-bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
- if (def_num < param_count_) return false;
- if (def_num < variable_count_) return true;
- return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
-}
-
-
-void TypeAnalyzer::Compute() {
- bool changed;
- int count = 0;
-
- do {
- changed = false;
-
- if (FLAG_print_graph_text) {
- PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
- }
-
- for (int i = postorder_->length() - 1; i >= 0; --i) {
- Node* node = postorder_->at(i);
- if (node->IsBlockNode()) {
- BlockNode* block = BlockNode::cast(node);
- for (int j = 0; j < block->instructions()->length(); j++) {
- Expression* expr = block->instructions()->at(j)->AsExpression();
- if (expr != NULL) {
- // For variable uses: Compute new type from reaching definitions.
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->reaching_definitions() != NULL) {
- BitVector* rd = proxy->reaching_definitions();
- bool prim_type = true;
- // TODO(fsc): A sparse set representation of reaching
- // definitions would speed up iterating here.
- for (int k = 0; k < rd->length(); k++) {
- if (rd->Contains(k) && !IsPrimitiveDef(k)) {
- prim_type = false;
- break;
- }
- }
- // Reset changed flag if new type information was computed.
- if (prim_type != proxy->IsPrimitive()) {
- changed = true;
- proxy->SetIsPrimitive(prim_type);
- }
- }
- }
- }
- }
- }
- } while (changed);
-}
-
-
-void Node::MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
-}
-
-
-void BlockNode::MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- for (int i = instructions_.length() - 1; i >= 0; i--) {
- // Only expressions can appear in the flow graph for now.
- Expression* expr = instructions_[i]->AsExpression();
- if (expr != NULL && !expr->is_live() &&
- (expr->is_loop_condition() || expr->IsCritical())) {
- expr->mark_as_live();
- expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
- }
- }
-}
-
-
-void MarkLiveCode(ZoneList<Node*>* nodes,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- List<AstNode*> stack(20);
-
- // Mark the critical AST nodes as live; mark their dependencies and
- // add them to the marking stack.
- for (int i = nodes->length() - 1; i >= 0; i--) {
- nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
- variable_count);
- }
-
- // Continue marking dependencies until no more.
- while (!stack.is_empty()) {
- // Only expressions can appear in the flow graph for now.
- Expression* expr = stack.RemoveLast()->AsExpression();
- if (expr != NULL) {
- expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
- }
- }
-}
-
-
-#ifdef DEBUG
-
-// Print a textual representation of an instruction in a flow graph. Using
-// the AstVisitor is overkill because there is no recursion here. It is
-// only used for printing in debug mode.
-class TextInstructionPrinter: public AstVisitor {
- public:
- TextInstructionPrinter() : number_(0) {}
-
- int NextNumber() { return number_; }
- void AssignNumber(AstNode* node) { node->set_num(number_++); }
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- int number_;
-
- DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter);
-};
-
-
-void TextInstructionPrinter::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitBlock(Block* stmt) {
- PrintF("Block");
-}
-
-
-void TextInstructionPrinter::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- PrintF("ExpressionStatement");
-}
-
-
-void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
- PrintF("EmptyStatement");
-}
-
-
-void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) {
- PrintF("IfStatement");
-}
-
-
-void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
- PrintF("return @%d", stmt->expression()->num());
-}
-
-
-void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
- PrintF("WithEnterStatement");
-}
-
-
-void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
- PrintF("WithExitStatement");
-}
-
-
-void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
- PrintF("DoWhileStatement");
-}
-
-
-void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
- PrintF("WhileStatement");
-}
-
-
-void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) {
- PrintF("ForStatement");
-}
-
-
-void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
- PrintF("ForInStatement");
-}
-
-
-void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
- PrintF("DebuggerStatement");
-}
-
-
-void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
- PrintF("FunctionLiteral");
-}
-
-
-void TextInstructionPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- PrintF("SharedFunctionInfoLiteral");
-}
-
-
-void TextInstructionPrinter::VisitConditional(Conditional* expr) {
- PrintF("Conditional");
-}
-
-
-void TextInstructionPrinter::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
- Variable* var = expr->AsVariable();
- if (var != NULL) {
- PrintF("%s", *var->name()->ToCString());
- if (var->IsStackAllocated() && expr->reaching_definitions() != NULL) {
- expr->reaching_definitions()->Print();
- }
- } else {
- ASSERT(expr->AsProperty() != NULL);
- VisitProperty(expr->AsProperty());
- }
-}
-
-
-void TextInstructionPrinter::VisitLiteral(Literal* expr) {
- expr->handle()->ShortPrint();
-}
-
-
-void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
- PrintF("RegExpLiteral");
-}
-
-
-void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
- PrintF("ObjectLiteral");
-}
-
-
-void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
- PrintF("ArrayLiteral");
-}
-
-
-void TextInstructionPrinter::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- PrintF("CatchExtensionObject");
-}
-
-
-void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
-
- if (var == NULL && prop == NULL) {
- // Throw reference error.
- Visit(expr->target());
- return;
- }
-
- // Print the left-hand side.
- if (var != NULL) {
- PrintF("%s", *var->name()->ToCString());
- } else if (prop != NULL) {
- PrintF("@%d", prop->obj()->num());
- if (prop->key()->IsPropertyName()) {
- PrintF(".");
- ASSERT(prop->key()->AsLiteral() != NULL);
- prop->key()->AsLiteral()->handle()->Print();
- } else {
- PrintF("[@%d]", prop->key()->num());
- }
- }
-
- // Print the operation.
- if (expr->is_compound()) {
- PrintF(" = ");
- // Print the left-hand side again when compound.
- if (var != NULL) {
- PrintF("@%d", expr->target()->num());
- } else {
- PrintF("@%d", prop->obj()->num());
- if (prop->key()->IsPropertyName()) {
- PrintF(".");
- ASSERT(prop->key()->AsLiteral() != NULL);
- prop->key()->AsLiteral()->handle()->Print();
- } else {
- PrintF("[@%d]", prop->key()->num());
- }
- }
- // Print the corresponding binary operator.
- PrintF(" %s ", Token::String(expr->binary_op()));
- } else {
- PrintF(" %s ", Token::String(expr->op()));
- }
-
- // Print the right-hand side.
- PrintF("@%d", expr->value()->num());
-
- if (expr->num() != AstNode::kNoNumber) {
- PrintF(" ;; D%d", expr->num());
- }
-}
-
-
-void TextInstructionPrinter::VisitThrow(Throw* expr) {
- PrintF("throw @%d", expr->exception()->num());
-}
-
-
-void TextInstructionPrinter::VisitProperty(Property* expr) {
- if (expr->key()->IsPropertyName()) {
- PrintF("@%d.", expr->obj()->num());
- ASSERT(expr->key()->AsLiteral() != NULL);
- expr->key()->AsLiteral()->handle()->Print();
- } else {
- PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num());
- }
-}
-
-
-void TextInstructionPrinter::VisitCall(Call* expr) {
- PrintF("@%d(", expr->expression()->num());
- ZoneList<Expression*>* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- if (i != 0) PrintF(", ");
- PrintF("@%d", arguments->at(i)->num());
- }
- PrintF(")");
-}
-
-
-void TextInstructionPrinter::VisitCallNew(CallNew* expr) {
- PrintF("new @%d(", expr->expression()->num());
- ZoneList<Expression*>* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- if (i != 0) PrintF(", ");
- PrintF("@%d", arguments->at(i)->num());
- }
- PrintF(")");
-}
-
-
-void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
- PrintF("%s(", *expr->name()->ToCString());
- ZoneList<Expression*>* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- if (i != 0) PrintF(", ");
- PrintF("@%d", arguments->at(i)->num());
- }
- PrintF(")");
-}
-
-
-void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
- PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
-}
-
-
-void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) {
- if (expr->is_prefix()) {
- PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
- } else {
- PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
- }
-
- if (expr->num() != AstNode::kNoNumber) {
- PrintF(" ;; D%d", expr->num());
- }
-}
-
-
-void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
- ASSERT(expr->op() != Token::COMMA);
- ASSERT(expr->op() != Token::OR);
- ASSERT(expr->op() != Token::AND);
- PrintF("@%d %s @%d",
- expr->left()->num(),
- Token::String(expr->op()),
- expr->right()->num());
-}
-
-
-void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
- PrintF("@%d %s @%d",
- expr->left()->num(),
- Token::String(expr->op()),
- expr->right()->num());
-}
-
-
-void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) {
- PrintF("ThisFunction");
-}
-
-
-static int node_count = 0;
-static int instruction_count = 0;
-
-
-void Node::AssignNodeNumber() {
- set_number(node_count++);
-}
-
-
-void Node::PrintReachingDefinitions() {
- if (rd_.rd_in() != NULL) {
- ASSERT(rd_.kill() != NULL && rd_.gen() != NULL);
-
- PrintF("RD_in = ");
- rd_.rd_in()->Print();
- PrintF("\n");
-
- PrintF("RD_kill = ");
- rd_.kill()->Print();
- PrintF("\n");
-
- PrintF("RD_gen = ");
- rd_.gen()->Print();
- PrintF("\n");
- }
-}
-
-
-void ExitNode::PrintText() {
- PrintReachingDefinitions();
- PrintF("L%d: Exit\n\n", number());
-}
-
-
-void BlockNode::PrintText() {
- PrintReachingDefinitions();
- // Print the instructions in the block.
- PrintF("L%d: Block\n", number());
- TextInstructionPrinter printer;
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- AstNode* instr = instructions_[i];
- // Print a star next to dead instructions.
- if (instr->AsExpression() != NULL && instr->AsExpression()->is_live()) {
- PrintF(" ");
- } else {
- PrintF("* ");
- }
- PrintF("%d ", printer.NextNumber());
- printer.Visit(instr);
- printer.AssignNumber(instr);
- PrintF("\n");
- }
- PrintF("goto L%d\n\n", successor_->number());
-}
-
-
-void BranchNode::PrintText() {
- PrintReachingDefinitions();
- PrintF("L%d: Branch\n", number());
- PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number());
-}
-
-
-void JoinNode::PrintText() {
- PrintReachingDefinitions();
- PrintF("L%d: Join(", number());
- for (int i = 0, len = predecessors_.length(); i < len; i++) {
- if (i != 0) PrintF(", ");
- PrintF("L%d", predecessors_[i]->number());
- }
- PrintF(")\ngoto L%d\n\n", successor_->number());
-}
-
-
-void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder) {
- PrintF("\n========\n");
- PrintF("name = %s\n", *fun->name()->ToCString());
-
- // Number nodes and instructions in reverse postorder.
- node_count = 0;
- instruction_count = 0;
- for (int i = postorder->length() - 1; i >= 0; i--) {
- postorder->at(i)->AssignNodeNumber();
- }
-
- // Print basic blocks in reverse postorder.
- for (int i = postorder->length() - 1; i >= 0; i--) {
- postorder->at(i)->PrintText();
- }
-}
-
-#endif // DEBUG
-
-
} } // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 66df635..079da65 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -272,65 +272,6 @@
};
-class ReachingDefinitions BASE_EMBEDDED {
- public:
- ReachingDefinitions(ZoneList<Node*>* postorder,
- ZoneList<Expression*>* body_definitions,
- int variable_count)
- : postorder_(postorder),
- body_definitions_(body_definitions),
- variable_count_(variable_count) {
- }
-
- static int IndexFor(Variable* var, int variable_count);
-
- void Compute();
-
- private:
- // A (postorder) list of flow-graph nodes in the body.
- ZoneList<Node*>* postorder_;
-
- // A list of all the definitions in the body.
- ZoneList<Expression*>* body_definitions_;
-
- int variable_count_;
-
- DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
-};
-
-
-class TypeAnalyzer BASE_EMBEDDED {
- public:
- TypeAnalyzer(ZoneList<Node*>* postorder,
- ZoneList<Expression*>* body_definitions,
- int variable_count,
- int param_count)
- : postorder_(postorder),
- body_definitions_(body_definitions),
- variable_count_(variable_count),
- param_count_(param_count) {}
-
- void Compute();
-
- private:
- // Get the primitity of definition number i. Definitions are numbered
- // by the flow graph builder.
- bool IsPrimitiveDef(int def_num);
-
- ZoneList<Node*>* postorder_;
- ZoneList<Expression*>* body_definitions_;
- int variable_count_;
- int param_count_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeAnalyzer);
-};
-
-
-void MarkLiveCode(ZoneList<Node*>* nodes,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
-
-
} } // namespace v8::internal
diff --git a/src/date.js b/src/date.js
index 46769c6..216d5df 100644
--- a/src/date.js
+++ b/src/date.js
@@ -588,6 +588,20 @@
function LocalTimezoneString(time) {
+ var old_timezone = timezone_cache_timezone;
+ var timezone = LocalTimezone(time);
+ if (old_timezone && timezone != old_timezone) {
+ // If the timezone string has changed from the one that we cached,
+ // the local time offset may now be wrong. So we need to update it
+ // and try again.
+ local_time_offset = %DateLocalTimeOffset();
+ // We also need to invalidate the DST cache as the new timezone may have
+ // different DST times.
+ var dst_cache = DST_offset_cache;
+ dst_cache.start = 0;
+ dst_cache.end = -1;
+ }
+
var timezoneOffset =
(DaylightSavingsOffset(time) + local_time_offset) / msPerMinute;
var sign = (timezoneOffset >= 0) ? 1 : -1;
@@ -595,7 +609,7 @@
var min = FLOOR((sign * timezoneOffset)%60);
var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
TwoDigitString(hours) + TwoDigitString(min);
- return gmt + ' (' + LocalTimezone(time) + ')';
+ return gmt + ' (' + timezone + ')';
}
@@ -654,7 +668,8 @@
function DateToString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
+ var time_zone_string = LocalTimezoneString(t); // May update local offset.
+ return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string;
}
@@ -670,8 +685,8 @@
function DateToTimeString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var lt = LocalTimeNoCheck(t);
- return TimeString(lt) + LocalTimezoneString(lt);
+ var time_zone_string = LocalTimezoneString(t); // May update local offset.
+ return TimeString(LocalTimeNoCheck(t)) + time_zone_string;
}
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index a81530e..cf949fc 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -327,7 +327,7 @@
if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
return this.script_id_ == script.id;
} else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
- return this.script_name_ == script.name &&
+ return this.script_name_ == script.nameOrSourceURL() &&
script.line_offset <= this.line_ &&
this.line_ < script.line_offset + script.lineCount();
}
@@ -1274,7 +1274,7 @@
// Response controls running state.
this.running_ = response.running;
}
- response.running = this.running_;
+ response.running = this.running_;
return response.toJSONProtocol();
} catch (e) {
// Failed to generate response - return generic error.
@@ -1870,12 +1870,12 @@
return response.failed('Invalid types "' + request.arguments.types + '"');
}
}
-
+
if (!IS_UNDEFINED(request.arguments.includeSource)) {
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
-
+
if (IS_ARRAY(request.arguments.ids)) {
idsToInclude = {};
var ids = request.arguments.ids;
@@ -1966,13 +1966,6 @@
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
- var change_pos = parseInt(request.arguments.change_pos);
- var change_len = parseInt(request.arguments.change_len);
- var new_string = request.arguments.new_string;
- if (!IS_STRING(new_string)) {
- response.failed('Argument "new_string" is not a string value');
- return;
- }
var scripts = %DebugGetLoadedScripts();
@@ -1986,16 +1979,38 @@
response.failed('Script not found');
return;
}
+
+ // A function that calls a proper signature of LiveEdit API.
+ var invocation;
var change_log = new Array();
+
+ if (IS_STRING(request.arguments.new_source)) {
+ var new_source = request.arguments.new_source;
+ invocation = function() {
+ return Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
+ }
+ } else {
+ var change_pos = parseInt(request.arguments.change_pos);
+ var change_len = parseInt(request.arguments.change_len);
+ var new_string = request.arguments.new_string;
+ if (!IS_STRING(new_string)) {
+ response.failed('Argument "new_string" is not a string value');
+ return;
+ }
+ invocation = function() {
+ return Debug.LiveEditChangeScript(the_script, change_pos, change_len,
+ new_string, change_log);
+ }
+ }
+
try {
- Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string,
- change_log);
+ invocation();
} catch (e) {
if (e instanceof Debug.LiveEditChangeScript.Failure) {
// Let's treat it as a "success" so that body with change_log will be
// sent back. "change_log" will have "failure" field set.
- change_log.push( { failure: true } );
+ change_log.push( { failure: true } );
} else {
throw e;
}
@@ -2076,7 +2091,7 @@
}
}
}
-
+
return content;
}
@@ -2099,7 +2114,7 @@
/**
- * Convert a value to its debugger protocol representation.
+ * Convert a value to its debugger protocol representation.
* @param {*} value The value to format as protocol value.
* @param {MirrorSerializer} mirror_serializer The serializer to use if any
* mirror objects are encountered.
diff --git a/src/flow-graph.cc b/src/flow-graph.cc
index bd9602f..02a2cd9 100644
--- a/src/flow-graph.cc
+++ b/src/flow-graph.cc
@@ -26,232 +26,87 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "flow-graph.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
-void FlowGraph::AppendInstruction(AstNode* instruction) {
- // Add a (non-null) AstNode to the end of the graph fragment.
- ASSERT(instruction != NULL);
- if (exit()->IsExitNode()) return;
- if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
- BlockNode::cast(exit())->AddInstruction(instruction);
-}
-
-
-void FlowGraph::AppendNode(Node* node) {
- // Add a node to the end of the graph. An empty block is added to
- // maintain edge-split form (that no join nodes or exit nodes as
- // successors to branch nodes).
- ASSERT(node != NULL);
- if (exit()->IsExitNode()) return;
- if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
- AppendNode(new BlockNode());
- }
- exit()->AddSuccessor(node);
- node->AddPredecessor(exit());
- exit_ = node;
-}
-
-
-void FlowGraph::AppendGraph(FlowGraph* graph) {
- // Add a flow graph fragment to the end of this one. An empty block is
- // added to maintain edge-split form (that no join nodes or exit nodes as
- // successors to branch nodes).
- ASSERT(graph != NULL);
- if (exit()->IsExitNode()) return;
- Node* node = graph->entry();
- if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
- AppendNode(new BlockNode());
- }
- exit()->AddSuccessor(node);
- node->AddPredecessor(exit());
- exit_ = graph->exit();
-}
-
-
-void FlowGraph::Split(BranchNode* branch,
- FlowGraph* left,
- FlowGraph* right,
- JoinNode* join) {
- // Add the branch node, left flowgraph, join node.
- AppendNode(branch);
- AppendGraph(left);
- AppendNode(join);
-
- // Splice in the right flowgraph.
- right->AppendNode(join);
- branch->AddSuccessor(right->entry());
- right->entry()->AddPredecessor(branch);
-}
-
-
-void FlowGraph::Loop(JoinNode* join,
- FlowGraph* condition,
- BranchNode* branch,
- FlowGraph* body) {
- // Add the join, condition and branch. Add join's predecessors in
- // left-to-right order.
- AppendNode(join);
- body->AppendNode(join);
- AppendGraph(condition);
- AppendNode(branch);
-
- // Splice in the body flowgraph.
- branch->AddSuccessor(body->entry());
- body->entry()->AddPredecessor(branch);
-}
-
-
-void ExitNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
+void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
+ ZoneList<BasicBlock*>* postorder,
+ bool mark) {
+ if (mark_ == mark) return;
+ mark_ = mark;
preorder->Add(this);
- postorder->Add(this);
-}
-
-
-void BlockNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor_ != NULL);
- preorder->Add(this);
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- successor_->Traverse(mark, preorder, postorder);
+ if (right_successor_ != NULL) {
+ right_successor_->BuildTraversalOrder(preorder, postorder, mark);
+ }
+ if (left_successor_ != NULL) {
+ left_successor_->BuildTraversalOrder(preorder, postorder, mark);
}
postorder->Add(this);
}
-void BranchNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor0_ != NULL && successor1_ != NULL);
- preorder->Add(this);
- if (!successor1_->IsMarkedWith(mark)) {
- successor1_->MarkWith(mark);
- successor1_->Traverse(mark, preorder, postorder);
- }
- if (!successor0_->IsMarkedWith(mark)) {
- successor0_->MarkWith(mark);
- successor0_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
+FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
+ // Create new entry and exit nodes. These will not change during
+ // construction.
+ entry_ = new BasicBlock(NULL);
+ exit_ = new BasicBlock(NULL);
+ // Begin accumulating instructions in the entry block.
+ current_ = entry_;
-
-void JoinNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor_ != NULL);
- preorder->Add(this);
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- successor_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
-
-
-void FlowGraphBuilder::Build(FunctionLiteral* lit) {
- global_exit_ = new ExitNode();
+ VisitDeclarations(lit->scope()->declarations());
VisitStatements(lit->body());
+ // In the event of stack overflow or failure to handle a syntactic
+ // construct, return an invalid flow graph.
+ if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
- if (HasStackOverflow()) return;
-
- // The graph can end with a branch node (if the function ended with a
- // loop). Maintain edge-split form (no join nodes or exit nodes as
- // successors to branch nodes).
- if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
- graph_.AppendNode(global_exit_);
-
- // Build preorder and postorder traversal orders. All the nodes in
- // the graph have the same mark flag. For the traversal, use that
- // flag's negation. Traversal will flip all the flags.
- bool mark = graph_.entry()->IsMarkedWith(false);
- graph_.entry()->MarkWith(mark);
- graph_.entry()->Traverse(mark, &preorder_, &postorder_);
-}
-
-
-// This function peels off one iteration of a for-loop. The return value
-// is either a block statement containing the peeled loop or NULL in case
-// there is a stack overflow.
-static Statement* PeelForLoop(ForStatement* stmt) {
- // Mark this for-statement as processed.
- stmt->set_peel_this_loop(false);
-
- // Create new block containing the init statement of the for-loop and
- // an if-statement containing the peeled iteration and the original
- // loop without the init-statement.
- Block* block = new Block(NULL, 2, false);
- if (stmt->init() != NULL) {
- Statement* init = stmt->init();
- // The init statement gets the statement position of the for-loop
- // to make debugging of peeled loops possible.
- init->set_statement_pos(stmt->statement_pos());
- block->AddStatement(init);
- }
-
- // Copy the condition.
- CopyAstVisitor copy_visitor;
- Expression* cond_copy = stmt->cond() != NULL
- ? copy_visitor.DeepCopyExpr(stmt->cond())
- : new Literal(Factory::true_value());
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- // Construct a block with the peeled body and the rest of the for-loop.
- Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- Statement* next_copy = stmt->next() != NULL
- ? copy_visitor.DeepCopyStmt(stmt->next())
- : new EmptyStatement();
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- Block* peeled_body = new Block(NULL, 3, false);
- peeled_body->AddStatement(body_copy);
- peeled_body->AddStatement(next_copy);
- peeled_body->AddStatement(stmt);
-
- // Remove the duplicated init statement from the for-statement.
- stmt->set_init(NULL);
-
- // Create new test at the top and add it to the newly created block.
- IfStatement* test = new IfStatement(cond_copy,
- peeled_body,
- new EmptyStatement());
- block->AddStatement(test);
- return block;
-}
-
-
-void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- stmts->at(i) = ProcessStatement(stmts->at(i));
- }
-}
-
-
-Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
- if (FLAG_loop_peeling &&
- stmt->AsForStatement() != NULL &&
- stmt->AsForStatement()->peel_this_loop()) {
- Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
- if (tmp_stmt == NULL) {
- SetStackOverflow();
- } else {
- stmt = tmp_stmt;
+ // If current is not the exit, add a link to the exit.
+ if (current_ != exit_) {
+ // If current already has a successor (i.e., will be a branch node) and
+ // if the exit already has a predecessor, insert an empty block to
+ // maintain edge split form.
+ if (current_->HasSuccessor() && exit_->HasPredecessor()) {
+ current_ = new BasicBlock(current_);
}
+ Literal* undefined = new Literal(Factory::undefined_value());
+ current_->AddInstruction(new ReturnStatement(undefined));
+ exit_->AddPredecessor(current_);
}
- Visit(stmt);
- return stmt;
+
+ FlowGraph* graph = new FlowGraph(entry_, exit_);
+ bool mark = !entry_->GetMark();
+ entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
+
+#ifdef DEBUG
+ // Number the nodes in reverse postorder.
+ int n = 0;
+ for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
+ graph->postorder()->at(i)->set_number(n++);
+ }
+#endif
+
+ return graph;
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
+ Variable* var = decl->proxy()->AsVariable();
+ Slot* slot = var->slot();
+ // We allow only declarations that do not require code generation.
+ // The following all require code generation: global variables and
+ // functions, variables with slot type LOOKUP, declarations with
+ // mode CONST, and functions.
+
+ if (var->is_global() ||
+ (slot != NULL && slot->type() == Slot::LOOKUP) ||
+ decl->mode() == Variable::CONST ||
+ decl->fun() != NULL) {
+ // Here and in the rest of the flow graph builder we indicate an
+ // unsupported syntactic construct by setting the stack overflow
+ // flag on the visitor. This causes bailout of the visitor.
+ SetStackOverflow();
+ }
}
@@ -271,21 +126,24 @@
void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ // Build a diamond in the flow graph. First accumulate the instructions
+ // of the test in the current basic block.
Visit(stmt->condition());
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
+ // Remember the branch node and accumulate the true branch as its left
+ // successor. This relies on the successors being added left to right.
+ BasicBlock* branch = current_;
+ current_ = new BasicBlock(branch);
+ Visit(stmt->then_statement());
- FlowGraph left = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
+ // Construct a join node and then accumulate the false branch in a fresh
+ // successor of the branch node.
+ BasicBlock* join = new BasicBlock(current_);
+ current_ = new BasicBlock(branch);
+ Visit(stmt->else_statement());
+ join->AddPredecessor(current_);
- if (HasStackOverflow()) return;
- JoinNode* join = new JoinNode();
- original.Split(branch, &left, &graph_, join);
- graph_ = original;
+ current_ = join;
}
@@ -330,23 +188,26 @@
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
+ // Build a loop in the flow graph. First accumulate the instructions of
+ // the initializer in the current basic block.
+ if (stmt->init() != NULL) Visit(stmt->init());
- JoinNode* join = new JoinNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
+ // Create a new basic block for the test. This will be the join node.
+ BasicBlock* join = new BasicBlock(current_);
+ current_ = join;
if (stmt->cond() != NULL) Visit(stmt->cond());
- BranchNode* branch = new BranchNode();
- FlowGraph condition = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_body(ProcessStatement(stmt->body()));
+ // The current node is the branch node. Create a new basic block to begin
+ // the body.
+ BasicBlock* branch = current_;
+ current_ = new BasicBlock(branch);
+ Visit(stmt->body());
+ if (stmt->next() != NULL) Visit(stmt->next());
- if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
-
- if (HasStackOverflow()) return;
- original.Loop(join, &condition, branch, &graph_);
- graph_ = original;
+ // Add the backward edge from the end of the body and continue with the
+ // false arm of the branch.
+ join->AddPredecessor(current_);
+ current_ = new BasicBlock(branch);
}
@@ -387,17 +248,18 @@
void FlowGraphBuilder::VisitSlot(Slot* expr) {
+ // Slots do not appear in the AST.
UNREACHABLE();
}
void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- graph_.AppendInstruction(expr);
+ current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitLiteral(Literal* expr) {
- graph_.AppendInstruction(expr);
+ current_->AddInstruction(expr);
}
@@ -422,29 +284,30 @@
void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
+ // There are three basic kinds of assignment: variable assignments,
+ // property assignments, and invalid left-hand sides (which are translated
+ // to "throw ReferenceError" by the parser).
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
- // Left-hand side can be a variable or property (or reference error) but
- // not both.
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
- if (expr->is_compound()) Visit(expr->target());
- Visit(expr->value());
- if (var->IsStackAllocated()) {
- // The first definition in the body is numbered n, where n is the
- // number of parameters and stack-allocated locals.
- expr->set_num(body_definitions_.length() + variable_count_);
- body_definitions_.Add(expr);
+ if (expr->is_compound() && !expr->target()->IsTrivial()) {
+ Visit(expr->target());
}
+ if (!expr->value()->IsTrivial()) Visit(expr->value());
+ current_->AddInstruction(expr);
} else if (prop != NULL) {
- Visit(prop->obj());
- if (!prop->key()->IsPropertyName()) Visit(prop->key());
- Visit(expr->value());
- }
+ if (!prop->obj()->IsTrivial()) Visit(prop->obj());
+ if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
+ Visit(prop->key());
+ }
+ if (!expr->value()->IsTrivial()) Visit(expr->value());
+ current_->AddInstruction(expr);
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ } else {
+ Visit(expr->target());
+ }
}
@@ -454,23 +317,18 @@
void FlowGraphBuilder::VisitProperty(Property* expr) {
- Visit(expr->obj());
- if (!expr->key()->IsPropertyName()) Visit(expr->key());
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ if (!expr->obj()->IsTrivial()) Visit(expr->obj());
+ if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
+ Visit(expr->key());
+ }
+ current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitCall(Call* expr) {
Visit(expr->expression());
- ZoneList<Expression*>* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- Visit(arguments->at(i));
- }
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ VisitExpressions(expr->arguments());
+ current_->AddInstruction(expr);
}
@@ -497,8 +355,7 @@
case Token::ADD:
case Token::SUB:
Visit(expr->expression());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ current_->AddInstruction(expr);
break;
default:
@@ -509,16 +366,7 @@
void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL && var->IsStackAllocated()) {
- // The first definition in the body is numbered n, where n is the number
- // of parameters and stack-allocated locals.
- expr->set_num(body_definitions_.length() + variable_count_);
- body_definitions_.Add(expr);
- }
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ current_->AddInstruction(expr);
}
@@ -534,17 +382,16 @@
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
+ case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
- case Token::SAR:
- Visit(expr->left());
- Visit(expr->right());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ if (!expr->left()->IsTrivial()) Visit(expr->left());
+ if (!expr->right()->IsTrivial()) Visit(expr->right());
+ current_->AddInstruction(expr);
break;
default:
@@ -568,10 +415,9 @@
case Token::GT:
case Token::LTE:
case Token::GTE:
- Visit(expr->left());
- Visit(expr->right());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
+ if (!expr->left()->IsTrivial()) Visit(expr->left());
+ if (!expr->right()->IsTrivial()) Visit(expr->right());
+ current_->AddInstruction(expr);
break;
default:
@@ -585,4 +431,333 @@
}
+#ifdef DEBUG
+
+// Print a textual representation of an instruction in a flow graph.
+class InstructionPrinter: public AstVisitor {
+ public:
+ InstructionPrinter() {}
+
+ private:
+ // Overridden from the base class.
+ virtual void VisitExpressions(ZoneList<Expression*>* exprs);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
+};
+
+
+static void PrintSubexpression(Expression* expr) {
+ if (!expr->IsTrivial()) {
+ PrintF("@%d", expr->num());
+ } else if (expr->AsLiteral() != NULL) {
+ expr->AsLiteral()->handle()->Print();
+ } else if (expr->AsVariableProxy() != NULL) {
+ PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
+ for (int i = 0; i < exprs->length(); ++i) {
+ if (i != 0) PrintF(", ");
+ PrintF("@%d", exprs->at(i)->num());
+ }
+}
+
+
+// We only define printing functions for the node types that can occur as
+// instructions in a flow graph. The rest are unreachable.
+void InstructionPrinter::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitBlock(Block* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
+ PrintF("return ");
+ PrintSubexpression(stmt->expression());
+}
+
+
+void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitConditional(Conditional* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->AsVariable();
+ if (var != NULL) {
+ PrintF("%s", *var->name()->ToCString());
+ } else {
+ ASSERT(expr->AsProperty() != NULL);
+ Visit(expr->AsProperty());
+ }
+}
+
+
+void InstructionPrinter::VisitLiteral(Literal* expr) {
+ expr->handle()->Print();
+}
+
+
+void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitAssignment(Assignment* expr) {
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+
+ // Print the left-hand side.
+ Visit(expr->target());
+ if (var == NULL && prop == NULL) return; // Throw reference error.
+ PrintF(" = ");
+ // For compound assignments, print the left-hand side again and the
+ // corresponding binary operator.
+ if (expr->is_compound()) {
+ PrintSubexpression(expr->target());
+ PrintF(" %s ", Token::String(expr->binary_op()));
+ }
+
+ // Print the right-hand side.
+ PrintSubexpression(expr->value());
+}
+
+
+void InstructionPrinter::VisitThrow(Throw* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitProperty(Property* expr) {
+ PrintSubexpression(expr->obj());
+ if (expr->key()->IsPropertyName()) {
+ PrintF(".");
+ ASSERT(expr->key()->AsLiteral() != NULL);
+ expr->key()->AsLiteral()->handle()->Print();
+ } else {
+ PrintF("[");
+ PrintSubexpression(expr->key());
+ PrintF("]");
+ }
+}
+
+
+void InstructionPrinter::VisitCall(Call* expr) {
+ PrintF("@%d(", expr->expression()->num());
+ VisitExpressions(expr->arguments());
+ PrintF(")");
+}
+
+
+void InstructionPrinter::VisitCallNew(CallNew* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
+ UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
+ PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
+}
+
+
+void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
+ if (expr->is_prefix()) {
+ PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
+ } else {
+ PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
+ }
+}
+
+
+void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
+ PrintSubexpression(expr->left());
+ PrintF(" %s ", Token::String(expr->op()));
+ PrintSubexpression(expr->right());
+}
+
+
+void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
+ PrintSubexpression(expr->left());
+ PrintF(" %s ", Token::String(expr->op()));
+ PrintSubexpression(expr->right());
+}
+
+
+void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
+ UNREACHABLE();
+}
+
+
+int BasicBlock::PrintAsText(int instruction_number) {
+ // Print a label for all blocks except the entry.
+ if (HasPredecessor()) {
+ PrintF("L%d:", number());
+ }
+
+ // Number and print the instructions. Since AST child nodes are visited
+ // before their parents, the parent nodes can refer to them by number.
+ InstructionPrinter printer;
+ for (int i = 0; i < instructions_.length(); ++i) {
+ PrintF("\n%d ", instruction_number);
+ instructions_[i]->set_num(instruction_number++);
+ instructions_[i]->Accept(&printer);
+ }
+
+ // If this is the exit, print "exit". If there is a single successor,
+ // print "goto" successor on a separate line. If there are two
+ // successors, print "goto" successor on the same line as the last
+ // instruction in the block. There is a blank line between blocks (and
+ // after the last one).
+ if (left_successor_ == NULL) {
+ PrintF("\nexit\n\n");
+ } else if (right_successor_ == NULL) {
+ PrintF("\ngoto L%d\n\n", left_successor_->number());
+ } else {
+ PrintF(", goto (L%d, L%d)\n\n",
+ left_successor_->number(),
+ right_successor_->number());
+ }
+
+ return instruction_number;
+}
+
+
+void FlowGraph::PrintAsText(Handle<String> name) {
+ PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
+ // Print nodes in reverse postorder. Note that AST node numbers are used
+ // during printing of instructions and thus their current values are
+ // destroyed.
+ int number = 0;
+ for (int i = postorder_.length() - 1; i >= 0; --i) {
+ number = postorder_[i]->PrintAsText(number);
+ }
+}
+
+#endif // DEBUG
+
+
} } // namespace v8::internal
diff --git a/src/flow-graph.h b/src/flow-graph.h
index 183b71d..f6af841 100644
--- a/src/flow-graph.h
+++ b/src/flow-graph.h
@@ -36,339 +36,140 @@
namespace v8 {
namespace internal {
-// Flow-graph nodes.
-class Node: public ZoneObject {
+// The nodes of a flow graph are basic blocks. Basic blocks consist of
+// instructions represented as pointers to AST nodes in the order that they
+// would be visited by the code generator. A block can have arbitrarily many
+// (even zero) predecessors and up to two successors. Blocks with multiple
+// predecessors are "join nodes" and blocks with multiple successors are
+// "branch nodes". A block can be both a branch and a join node.
+//
+// Flow graphs are in edge split form: a branch node is never the
+// predecessor of a merge node. Empty basic blocks are inserted to maintain
+// edge split form.
+class BasicBlock: public ZoneObject {
public:
- Node() : number_(-1), mark_(false) {}
+ // Construct a basic block with a given predecessor. NULL indicates no
+ // predecessor or that the predecessor will be set later.
+ explicit BasicBlock(BasicBlock* predecessor)
+ : predecessors_(2),
+ instructions_(8),
+ left_successor_(NULL),
+ right_successor_(NULL),
+ mark_(false) {
+ if (predecessor != NULL) AddPredecessor(predecessor);
+ }
- virtual ~Node() {}
+ bool HasPredecessor() { return !predecessors_.is_empty(); }
+ bool HasSuccessor() { return left_successor_ != NULL; }
- virtual bool IsExitNode() { return false; }
- virtual bool IsBlockNode() { return false; }
- virtual bool IsBranchNode() { return false; }
- virtual bool IsJoinNode() { return false; }
-
- virtual void AddPredecessor(Node* predecessor) = 0;
- virtual void AddSuccessor(Node* successor) = 0;
-
- bool IsMarkedWith(bool mark) { return mark_ == mark; }
- void MarkWith(bool mark) { mark_ = mark; }
-
- // Perform a depth first search and record preorder and postorder
- // traversal orders.
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) = 0;
-
- int number() { return number_; }
- void set_number(int number) { number_ = number; }
-
- // Functions used by data-flow analyses.
- virtual void InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark);
- virtual void ComputeRDOut(BitVector* result) = 0;
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
- virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
-
- // Functions used by dead-code elimination.
- virtual void MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
-
-#ifdef DEBUG
- void AssignNodeNumber();
- void PrintReachingDefinitions();
- virtual void PrintText() = 0;
-#endif
-
- protected:
- ReachingDefinitionsData rd_;
-
- private:
- int number_;
- bool mark_;
-
- DISALLOW_COPY_AND_ASSIGN(Node);
-};
-
-
-// An exit node has a arbitrarily many predecessors and no successors.
-class ExitNode: public Node {
- public:
- ExitNode() : predecessors_(4) {}
-
- virtual bool IsExitNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
+ // Add a given basic block as a predecessor of this block. This function
+ // also adds this block as a successor of the given block.
+ void AddPredecessor(BasicBlock* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
+ predecessor->AddSuccessor(this);
}
- virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
-
-#ifdef DEBUG
- virtual void PrintText();
-#endif
-
- private:
- ZoneList<Node*> predecessors_;
-
- DISALLOW_COPY_AND_ASSIGN(ExitNode);
-};
-
-
-// Block nodes have a single successor and predecessor and a list of
-// instructions.
-class BlockNode: public Node {
- public:
- BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
-
- static BlockNode* cast(Node* node) {
- ASSERT(node->IsBlockNode());
- return reinterpret_cast<BlockNode*>(node);
- }
-
- virtual bool IsBlockNode() { return true; }
-
- bool is_empty() { return instructions_.is_empty(); }
-
- ZoneList<AstNode*>* instructions() { return &instructions_; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor_ == NULL && predecessor != NULL);
- predecessor_ = predecessor;
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor_ == NULL && successor != NULL);
- successor_ = successor;
- }
-
+ // Add an instruction to the end of this block. The block must be "open"
+ // by not having a successor yet.
void AddInstruction(AstNode* instruction) {
+ ASSERT(!HasSuccessor() && instruction != NULL);
instructions_.Add(instruction);
}
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark);
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
- virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
-
- virtual void MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
+ // Perform a depth-first traversal of graph rooted at this node,
+ // accumulating pre- and postorder traversal orders. Visited nodes are
+ // marked with mark.
+ void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
+ ZoneList<BasicBlock*>* postorder,
+ bool mark);
+ bool GetMark() { return mark_; }
#ifdef DEBUG
- virtual void PrintText();
+ // In debug mode, blocks are numbered in reverse postorder to help with
+ // printing.
+ int number() { return number_; }
+ void set_number(int n) { number_ = n; }
+
+ // Print a basic block, given the number of the first instruction.
+ // Returns the next number after the number of the last instruction.
+ int PrintAsText(int instruction_number);
#endif
private:
- Node* predecessor_;
- Node* successor_;
- ZoneList<AstNode*> instructions_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockNode);
-};
-
-
-// Branch nodes have a single predecessor and a pair of successors.
-class BranchNode: public Node {
- public:
- BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
-
- virtual bool IsBranchNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor_ == NULL && predecessor != NULL);
- predecessor_ = predecessor;
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor1_ == NULL && successor != NULL);
- if (successor0_ == NULL) {
- successor0_ = successor;
+ // Add a given basic block as successor to this block. This function does
+ // not add this block as a predecessor of the given block so as to avoid
+ // circularity.
+ void AddSuccessor(BasicBlock* successor) {
+ ASSERT(right_successor_ == NULL && successor != NULL);
+ if (HasSuccessor()) {
+ right_successor_ = successor;
} else {
- successor1_ = successor;
+ left_successor_ = successor;
}
}
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
+ ZoneList<BasicBlock*> predecessors_;
+ ZoneList<AstNode*> instructions_;
+ BasicBlock* left_successor_;
+ BasicBlock* right_successor_;
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+ // Support for graph traversal. Before traversal, all nodes in the graph
+ // have the same mark (true or false). Traversal marks already-visited
+ // nodes with the opposite mark. After traversal, all nodes again have
+ // the same mark. Traversal of the same graph is not reentrant.
+ bool mark_;
#ifdef DEBUG
- virtual void PrintText();
+ int number_;
#endif
- private:
- Node* predecessor_;
- Node* successor0_;
- Node* successor1_;
-
- DISALLOW_COPY_AND_ASSIGN(BranchNode);
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
-// Join nodes have arbitrarily many predecessors and a single successor.
-class JoinNode: public Node {
+// A flow graph has distinguished entry and exit blocks. The entry block is
+// the only one with no predecessors and the exit block is the only one with
+// no successors.
+class FlowGraph: public ZoneObject {
public:
- JoinNode() : predecessors_(2), successor_(NULL) {}
-
- static JoinNode* cast(Node* node) {
- ASSERT(node->IsJoinNode());
- return reinterpret_cast<JoinNode*>(node);
+ FlowGraph(BasicBlock* entry, BasicBlock* exit)
+ : entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
}
- virtual bool IsJoinNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor != NULL);
- predecessors_.Add(predecessor);
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor_ == NULL && successor != NULL);
- successor_ = successor;
- }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+ ZoneList<BasicBlock*>* preorder() { return &preorder_; }
+ ZoneList<BasicBlock*>* postorder() { return &postorder_; }
#ifdef DEBUG
- virtual void PrintText();
+ void PrintAsText(Handle<String> name);
#endif
private:
- ZoneList<Node*> predecessors_;
- Node* successor_;
-
- DISALLOW_COPY_AND_ASSIGN(JoinNode);
+ BasicBlock* entry_;
+ BasicBlock* exit_;
+ ZoneList<BasicBlock*> preorder_;
+ ZoneList<BasicBlock*> postorder_;
};
-// Flow graphs have a single entry and single exit. The empty flowgraph is
-// represented by both entry and exit being NULL.
-class FlowGraph BASE_EMBEDDED {
- public:
- static FlowGraph Empty() {
- FlowGraph graph;
- graph.entry_ = new BlockNode();
- graph.exit_ = graph.entry_;
- return graph;
- }
-
- bool is_empty() const {
- return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
- }
- Node* entry() const { return entry_; }
- Node* exit() const { return exit_; }
-
- // Add a single instruction to the end of this flowgraph.
- void AppendInstruction(AstNode* instruction);
-
- // Add a single node to the end of this flow graph.
- void AppendNode(Node* node);
-
- // Add a flow graph fragment to the end of this one.
- void AppendGraph(FlowGraph* graph);
-
- // Concatenate an if-then-else flow-graph to this one. Control is split
- // and merged, so the graph remains single-entry, single-exit.
- void Split(BranchNode* branch,
- FlowGraph* left,
- FlowGraph* right,
- JoinNode* merge);
-
- // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
- // one. Control is split by the condition and merged back from the back
- // edge at end of the body to the beginning of the condition. The single
- // (free) exit of the result graph is the right (false) arm of the branch
- // node.
- void Loop(JoinNode* merge,
- FlowGraph* condition,
- BranchNode* branch,
- FlowGraph* body);
-
-#ifdef DEBUG
- void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
-#endif
-
- private:
- FlowGraph() : entry_(NULL), exit_(NULL) {}
-
- Node* entry_;
- Node* exit_;
-};
-
-
-// Construct a flow graph from a function literal. Build pre- and postorder
-// traversal orders as a byproduct.
+// The flow graph builder walks the AST adding reachable AST nodes to the
+// flow graph as instructions. It remembers the entry and exit nodes of the
+// graph, and keeps a pointer to the current block being constructed.
class FlowGraphBuilder: public AstVisitor {
public:
- explicit FlowGraphBuilder(int variable_count)
- : graph_(FlowGraph::Empty()),
- global_exit_(NULL),
- preorder_(4),
- postorder_(4),
- variable_count_(variable_count),
- body_definitions_(4) {
- }
+ FlowGraphBuilder() {}
- void Build(FunctionLiteral* lit);
-
- FlowGraph* graph() { return &graph_; }
- ZoneList<Node*>* preorder() { return &preorder_; }
- ZoneList<Node*>* postorder() { return &postorder_; }
- ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
+ FlowGraph* Build(FunctionLiteral* lit);
private:
- ExitNode* global_exit() { return global_exit_; }
-
- // Helpers to allow tranforming the ast during flow graph construction.
- void VisitStatements(ZoneList<Statement*>* stmts);
- Statement* ProcessStatement(Statement* stmt);
-
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- FlowGraph graph_;
- ExitNode* global_exit_;
- ZoneList<Node*> preorder_;
- ZoneList<Node*> postorder_;
-
- // The flow graph builder collects a list of explicit definitions
- // (assignments and count operations) to stack-allocated variables to use
- // for reaching definitions analysis. It does not count the implicit
- // definition at function entry. AST node numbers in the AST are used to
- // refer into this list.
- int variable_count_;
- ZoneList<Expression*> body_definitions_;
+ BasicBlock* entry_;
+ BasicBlock* exit_;
+ BasicBlock* current_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
diff --git a/src/globals.h b/src/globals.h
index b85e19d..ea74b5d 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -112,8 +112,9 @@
#define V8PRIxPTR "lx"
#endif
-#if defined(__APPLE__) && defined(__MACH__)
-#define USING_MAC_ABI
+#if (defined(__APPLE__) && defined(__MACH__)) || \
+ defined(__FreeBSD__) || defined(__OpenBSD__)
+#define USING_BSD_ABI
#endif
// Code-point values in Unicode 4.0 are 21 bits wide.
diff --git a/src/handles.cc b/src/handles.cc
index d4c593f..05cb3f2 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -737,7 +737,7 @@
ClearExceptionFlag flag) {
CompilationInfo info(function, 0, receiver);
bool result = CompileLazyHelper(&info, flag);
- LOG(FunctionCreateEvent(*function));
+ PROFILE(FunctionCreateEvent(*function));
return result;
}
@@ -747,7 +747,7 @@
ClearExceptionFlag flag) {
CompilationInfo info(function, 1, receiver);
bool result = CompileLazyHelper(&info, flag);
- LOG(FunctionCreateEvent(*function));
+ PROFILE(FunctionCreateEvent(*function));
return result;
}
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 892c289..bf9c535 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -236,19 +236,27 @@
void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
+ CopyWords(dst, src, byte_size / kPointerSize);
+}
- // Use block copying memcpy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16 * kPointerSize;
- if (byte_size >= kBlockCopyLimit) {
- memcpy(dst, src, byte_size);
- } else {
- int remaining = byte_size / kPointerSize;
- do {
- remaining--;
+void Heap::MoveBlock(Object** dst, Object** src, size_t byte_size) {
+ ASSERT(IsAligned<size_t>(byte_size, kPointerSize));
+
+ int size_in_words = byte_size / kPointerSize;
+
+ if ((dst < src) || (dst >= (src + size_in_words))) {
+ ASSERT((dst >= (src + size_in_words)) ||
+ ((OffsetFrom(reinterpret_cast<Address>(src)) -
+ OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
+
+ Object** end = src + size_in_words;
+
+ while (src != end) {
*dst++ = *src++;
- } while (remaining > 0);
+ }
+ } else {
+ memmove(dst, src, byte_size);
}
}
diff --git a/src/heap.cc b/src/heap.cc
index 5421dcc..f75e458 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -562,23 +562,18 @@
EnsureFromSpaceIsCommitted();
- // Perform mark-sweep with optional compaction.
if (collector == MARK_COMPACTOR) {
+ // Perform mark-sweep with optional compaction.
MarkCompact(tracer);
- }
- // Always perform a scavenge to make room in new space.
- Scavenge();
-
- // Update the old space promotion limits after the scavenge due to
- // promotions during scavenge.
- if (collector == MARK_COMPACTOR) {
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
+ } else {
+ Scavenge();
}
Counters::objs_since_last_young.Set(0);
@@ -764,6 +759,17 @@
#endif
+void Heap::CheckNewSpaceExpansionCriteria() {
+ if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
+ survived_since_last_expansion_ > new_space_.Capacity()) {
+ // Grow the size of new space if there is room to grow and enough
+ // data has survived scavenge since the last expansion.
+ new_space_.Grow();
+ survived_since_last_expansion_ = 0;
+ }
+}
+
+
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -780,13 +786,7 @@
// Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize();
- if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity()) {
- // Grow the size of new space if there is room to grow and enough
- // data has survived scavenge since the last expansion.
- new_space_.Grow();
- survived_since_last_expansion_ = 0;
- }
+ CheckNewSpaceExpansionCriteria();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -837,15 +837,17 @@
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- ScavengeExternalStringTable();
+ UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
ASSERT(new_space_front == new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
- survived_since_last_expansion_ +=
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+ IncrementYoungSurvivorsCounter(
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
LOG(ResourceEvent("scavenge", "end"));
@@ -853,7 +855,22 @@
}
-void Heap::ScavengeExternalStringTable() {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
+ MapWord first_word = HeapObject::cast(*p)->map_word();
+
+ if (!first_word.IsForwardingAddress()) {
+ // Unreachable external string can be finalized.
+ FinalizeExternalString(String::cast(*p));
+ return NULL;
+ }
+
+ // String is still reachable.
+ return String::cast(first_word.ToForwardingAddress());
+}
+
+
+void Heap::UpdateNewSpaceReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func) {
ExternalStringTable::Verify();
if (ExternalStringTable::new_space_strings_.is_empty()) return;
@@ -864,16 +881,10 @@
for (Object** p = start; p < end; ++p) {
ASSERT(Heap::InFromSpace(*p));
- MapWord first_word = HeapObject::cast(*p)->map_word();
+ String* target = updater_func(p);
- if (!first_word.IsForwardingAddress()) {
- // Unreachable external string can be finalized.
- FinalizeExternalString(String::cast(*p));
- continue;
- }
+ if (target == NULL) continue;
- // String is still reachable.
- String* target = String::cast(first_word.ToForwardingAddress());
ASSERT(target->IsExternalString());
if (Heap::InNewSpace(target)) {
diff --git a/src/heap.h b/src/heap.h
index 2a0de23..03d3502 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -149,6 +149,11 @@
V(number_symbol, "number") \
V(Number_symbol, "Number") \
V(RegExp_symbol, "RegExp") \
+ V(source_symbol, "source") \
+ V(global_symbol, "global") \
+ V(ignore_case_symbol, "ignoreCase") \
+ V(multiline_symbol, "multiline") \
+ V(last_index_symbol, "lastIndex") \
V(object_symbol, "object") \
V(prototype_symbol, "prototype") \
V(string_symbol, "string") \
@@ -195,6 +200,9 @@
class HeapStats;
+typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+
+
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@@ -938,6 +946,30 @@
static void RecordStats(HeapStats* stats);
+ // Copy block of memory from src to dst. Size of block should be aligned
+ // by pointer size.
+ static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+
+ // Optimized version of memmove for blocks with pointer size aligned sizes and
+ // pointer size aligned addresses.
+ static inline void MoveBlock(Object** dst, Object** src, size_t byte_size);
+
+ // Check new space expansion criteria and expand semispaces if it was hit.
+ static void CheckNewSpaceExpansionCriteria();
+
+ static inline void IncrementYoungSurvivorsCounter(int survived) {
+ survived_since_last_expansion_ += survived;
+ }
+
+ static void UpdateNewSpaceReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func);
+
+ // Helper function that governs the promotion policy from new space to
+ // old. If the object's old address lies below the new space's age
+ // mark or if we've already filled the bottom 1/16th of the to space,
+ // we try to promote this object.
+ static inline bool ShouldBePromoted(Address old_address, int object_size);
+
static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
private:
@@ -1134,7 +1166,10 @@
// Performs a minor collection in new generation.
static void Scavenge();
- static void ScavengeExternalStringTable();
+
+ static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Object** pointer);
+
static Address DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front);
@@ -1152,11 +1187,6 @@
HeapObject* target,
int size);
- // Helper function that governs the promotion policy from new space to
- // old. If the object's old address lies below the new space's age
- // mark or if we've already filled the bottom 1/16th of the to space,
- // we try to promote this object.
- static inline bool ShouldBePromoted(Address old_address, int object_size);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record the copy of an object in the NewSpace's statistics.
static void RecordCopiedObject(HeapObject* obj);
@@ -1175,9 +1205,6 @@
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
- // Copy memory from src to dst.
- static inline void CopyBlock(Object** dst, Object** src, int byte_size);
-
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 7f0d5d4..26e40b1 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -123,8 +123,8 @@
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!code->IsCode()) return;
- LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
+ PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 200e3ef..ee8238a 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1132,9 +1132,9 @@
static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
+ Token::Value op,
+ const Result& right,
+ const Result& left) {
// Set TypeInfo of result according to the operation performed.
// Rely on the fact that smis have a 31 bit payload on ia32.
ASSERT(kSmiValueSize == 31);
@@ -1193,11 +1193,12 @@
if (operands_type.IsSmi()) {
// The Integer32 range is big enough to take the sum of any two Smis.
return TypeInfo::Integer32();
+ } else if (operands_type.IsNumber()) {
+ return TypeInfo::Number();
+ } else if (left.type_info().IsString() || right.type_info().IsString()) {
+ return TypeInfo::String();
} else {
- // Result could be a string or a number. Check types of inputs.
- return operands_type.IsNumber()
- ? TypeInfo::Number()
- : TypeInfo::Unknown();
+ return TypeInfo::Unknown();
}
case Token::SHL:
return TypeInfo::Integer32();
@@ -1220,11 +1221,10 @@
}
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- StaticType* type,
- OverwriteMode overwrite_mode,
- bool no_negative_zero) {
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
Comment cmnt_token(masm_, Token::String(op));
if (op == Token::COMMA) {
@@ -1237,8 +1237,13 @@
Result left = frame_->Pop();
if (op == Token::ADD) {
- bool left_is_string = left.is_constant() && left.handle()->IsString();
- bool right_is_string = right.is_constant() && right.handle()->IsString();
+ const bool left_is_string = left.type_info().IsString();
+ const bool right_is_string = right.type_info().IsString();
+ // Make sure constant strings have string type info.
+ ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+ left_is_string);
+ ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+ right_is_string);
if (left_is_string || right_is_string) {
frame_->Push(&left);
frame_->Push(&right);
@@ -1247,7 +1252,8 @@
if (right_is_string) {
// TODO(lrn): if both are constant strings
// -- do a compile time cons, if allocation during codegen is allowed.
- answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
} else {
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
@@ -1256,6 +1262,7 @@
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
}
+ answer.set_type_info(TypeInfo::String());
frame_->Push(&answer);
return;
}
@@ -1290,13 +1297,11 @@
operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode,
- no_negative_zero);
+ answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+ false, overwrite_mode);
} else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode,
- no_negative_zero);
+ answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+ true, overwrite_mode);
} else {
// Set the flags based on the operation, type and loop nesting level.
// Bit operations always assume they likely operate on Smis. Still only
@@ -1306,9 +1311,8 @@
if (loop_nesting() > 0 &&
(Token::IsBitOp(op) ||
operands_type.IsInteger32() ||
- type->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(op, &left, &right,
- overwrite_mode, no_negative_zero);
+ expr->type()->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
} else {
GenericBinaryOpStub stub(op,
overwrite_mode,
@@ -1412,11 +1416,11 @@
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
- OverwriteMode overwrite_mode,
- bool no_negative_zero) {
+ OverwriteMode overwrite_mode) {
+ Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
@@ -1522,7 +1526,7 @@
// virtual frame is unchanged in this block, so local control flow
// can use a Label rather than a JumpTarget. If the context of this
// expression will treat -0 like 0, do not do this test.
- if (!no_negative_zero) {
+ if (!expr->no_negative_zero()) {
Label non_zero_result;
__ test(left->reg(), Operand(left->reg()));
__ j(not_zero, &non_zero_result);
@@ -1551,7 +1555,7 @@
// the dividend is negative, return a floating point negative
// zero. The frame is unchanged in this block, so local control
// flow can use a Label rather than a JumpTarget.
- if (!no_negative_zero) {
+ if (!expr->no_negative_zero()) {
Label non_zero_result;
__ test(edx, Operand(edx));
__ j(not_zero, &non_zero_result, taken);
@@ -1735,7 +1739,7 @@
// argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
- if (!no_negative_zero) {
+ if (!expr->no_negative_zero()) {
Label non_zero_result;
__ test(answer.reg(), Operand(answer.reg()));
__ j(not_zero, &non_zero_result, taken);
@@ -1978,13 +1982,12 @@
}
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode,
- bool no_negative_zero) {
+Result CodeGenerator::ConstantSmiBinaryOperation(
+ BinaryOperation* expr,
+ Result* operand,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@@ -1994,11 +1997,11 @@
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
- overwrite_mode, no_negative_zero);
+ return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
- overwrite_mode, no_negative_zero);
+ return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
+ overwrite_mode);
}
}
@@ -2006,6 +2009,7 @@
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Token::Value op = expr->op();
Result answer;
switch (op) {
case Token::ADD: {
@@ -2081,8 +2085,8 @@
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -2118,8 +2122,8 @@
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -2319,11 +2323,11 @@
// default case here.
Result constant_operand(value);
if (reversed) {
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
} else {
- answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
+ overwrite_mode);
}
}
break;
@@ -2359,11 +2363,11 @@
default: {
Result constant_operand(value);
if (reversed) {
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
} else {
- answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode, no_negative_zero);
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
@@ -5363,10 +5367,11 @@
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- GenericBinaryOperation(node->binary_op(),
- node->type(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- node->no_negative_zero());
+ // Construct the implicit binary operation.
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
Load(node->value());
}
@@ -5441,10 +5446,11 @@
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- GenericBinaryOperation(node->binary_op(),
- node->type(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- node->no_negative_zero());
+ // Construct the implicit binary operation.
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
Load(node->value());
}
@@ -5521,10 +5527,10 @@
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- GenericBinaryOperation(node->binary_op(),
- node->type(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- node->no_negative_zero());
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
Load(node->value());
}
@@ -6222,12 +6228,30 @@
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- // ArgumentsAccessStub takes the parameter count as an input argument
- // in register eax. Create a constant result for it.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to the arguments.length.
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
- Result result = frame_->CallStub(&stub, &count);
+
+ Result fp = allocator_->Allocate();
+ Result result = allocator_->Allocate();
+ ASSERT(fp.is_valid() && result.is_valid());
+
+ Label exit;
+
+ // Get the number of formal parameters.
+ __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ mov(result.reg(),
+ Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ result.set_type_info(TypeInfo::Smi());
+ if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
frame_->Push(&result);
}
@@ -7003,8 +7027,10 @@
// specialized add or subtract stub. The result is left in dst.
class DeferredPrefixCountOperation: public DeferredCode {
public:
- DeferredPrefixCountOperation(Register dst, bool is_increment)
- : dst_(dst), is_increment_(is_increment) {
+ DeferredPrefixCountOperation(Register dst,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
set_comment("[ DeferredCountOperation");
}
@@ -7013,6 +7039,7 @@
private:
Register dst_;
bool is_increment_;
+ TypeInfo input_type_;
};
@@ -7023,15 +7050,21 @@
} else {
__ add(Operand(dst_), Immediate(Smi::FromInt(1)));
}
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(1)));
- if (is_increment_) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ Register left;
+ if (input_type_.IsNumber()) {
+ left = dst_;
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ left = eax;
}
+
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -7043,8 +7076,14 @@
// The result is left in dst.
class DeferredPostfixCountOperation: public DeferredCode {
public:
- DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
- : dst_(dst), old_(old), is_increment_(is_increment) {
+ DeferredPostfixCountOperation(Register dst,
+ Register old,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst),
+ old_(old),
+ is_increment_(is_increment),
+ input_type_(input_type) {
set_comment("[ DeferredCountOperation");
}
@@ -7054,6 +7093,7 @@
Register dst_;
Register old_;
bool is_increment_;
+ TypeInfo input_type_;
};
@@ -7064,20 +7104,23 @@
} else {
__ add(Operand(dst_), Immediate(Smi::FromInt(1)));
}
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- // Save the result of ToNumber to use as the old value.
- __ push(eax);
-
- // Call the runtime for the addition or subtraction.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(1)));
- if (is_increment_) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ Register left;
+ if (input_type_.IsNumber()) {
+ __ push(dst_); // Save the input to use as the old value.
+ left = dst_;
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(eax); // Save the result of ToNumber to use as the old value.
+ left = eax;
}
+
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
if (!dst_.is(eax)) __ mov(dst_, eax);
__ pop(old_);
}
@@ -7120,9 +7163,13 @@
ASSERT(old_value.is_valid());
__ mov(old_value.reg(), new_value.reg());
- // The return value for postfix operations is the
- // same as the input, and has the same number info.
- old_value.set_type_info(new_value.type_info());
+ // The return value for postfix operations is ToNumber(input).
+ // Keep more precise type info if the input is some kind of
+ // number already. If the input is not a number we have to wait
+ // for the deferred code to convert it.
+ if (new_value.type_info().IsNumber()) {
+ old_value.set_type_info(new_value.type_info());
+ }
}
// Ensure the new value is writable.
@@ -7156,10 +7203,12 @@
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
old_value.reg(),
- is_increment);
+ is_increment,
+ new_value.type_info());
} else {
deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment);
+ is_increment,
+ new_value.type_info());
}
if (new_value.is_smi()) {
@@ -7186,6 +7235,13 @@
}
deferred->BindExit();
+ // Postfix count operations return their input converted to
+ // number. The case when the input is already a number is covered
+ // above in the allocation code for old_value.
+ if (is_postfix && !new_value.type_info().IsNumber()) {
+ old_value.set_type_info(TypeInfo::Number());
+ }
+
// The result of ++ or -- is an Integer32 if the
// input is a smi. Otherwise it is a number.
if (new_value.is_smi()) {
@@ -7596,8 +7652,7 @@
Load(node->left());
Load(node->right());
}
- GenericBinaryOperation(node->op(), node->type(),
- overwrite_mode, node->no_negative_zero());
+ GenericBinaryOperation(node, overwrite_mode);
}
}
@@ -10374,30 +10429,6 @@
}
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame and return it.
- // Otherwise nothing to do: The number of formal parameters has already been
- // passed in register eax by calling function. Just return it.
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(equal, eax,
- Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- } else {
- Label exit;
- __ j(not_equal, &exit);
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ bind(&exit);
- }
- __ ret(0);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -11345,7 +11376,7 @@
// If true, a Handle<T> passed by value is passed and returned by
// using the location_ field directly. If false, it is passed and
// returned as a pointer to a handle.
-#ifdef USING_MAC_ABI
+#ifdef USING_BSD_ABI
static const bool kPassHandlesDirectly = true;
#else
static const bool kPassHandlesDirectly = false;
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 9fcc466..a92386f 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -492,11 +492,8 @@
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
- void GenericBinaryOperation(
- Token::Value op,
- StaticType* type,
- OverwriteMode overwrite_mode,
- bool no_negative_zero);
+ void GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
@@ -505,22 +502,19 @@
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result operand.
- Result ConstantSmiBinaryOperation(Token::Value op,
+ Result ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> constant_operand,
- StaticType* type,
bool reversed,
- OverwriteMode overwrite_mode,
- bool no_negative_zero);
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results left and right.
- Result LikelySmiBinaryOperation(Token::Value op,
+ Result LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
- OverwriteMode overwrite_mode,
- bool no_negative_zero);
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two untagged int32 values.
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index c6accbd..7e82528 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -832,7 +832,7 @@
NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- LOG(RegExpCodeCreateEvent(*code, *source));
+ PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index e336db7..2cbf0d5 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -32,14 +32,14 @@
// Changes script text and recompiles all relevant functions if possible.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
-//
+//
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are left
// unchanged and re-linked to a newly created script instance representing old
// version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
-// their positions updated.
+// their positions updated.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable description
@@ -56,9 +56,9 @@
// Elements of array are ordered by start positions of functions (from top
// to bottom) in the source. Fields outer_index and next_sibling_index help
// to navigate the nesting structure of functions.
- //
- // The script is used for compilation, because it produces code that
- // needs to be linked with some particular script (for nested functions).
+ //
+ // The script is used for compilation, because it produces code that
+ // needs to be linked with some particular script (for nested functions).
function DebugGatherCompileInfo(source) {
// Get function info, elements are partially sorted (it is a tree
// of nested functions serialized as parent followed by serialized children.
@@ -71,7 +71,7 @@
compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i]));
old_index_map.push(i);
}
-
+
for (var i = 0; i < compile_info.length; i++) {
var k = i;
for (var j = i + 1; j < compile_info.length; j++) {
@@ -112,12 +112,12 @@
compile_info[previous_sibling].next_sibling_index = -1;
}
}
-
+
ResetIndexes(-1, -1);
Assert(current_index == compile_info.length);
-
+
return compile_info;
- }
+ }
// Given a positions, finds a function that fully includes the entire change.
function FindChangedFunction(compile_info, offset, len) {
@@ -148,7 +148,7 @@
var old_info = old_compile_info[index];
for (var i = 0; i < shared_infos.length; i++) {
var info = shared_infos[i];
- if (info.start_position == old_info.start_position &&
+ if (info.start_position == old_info.start_position &&
info.end_position == old_info.end_position) {
return info;
}
@@ -161,7 +161,7 @@
change_log.push( {function_patched: new_info.function_name} );
}
-
+
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
@@ -175,7 +175,7 @@
}
return -1;
}
-
+
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
@@ -187,7 +187,7 @@
position_change_array);
position_patch_report.push( { name: new_info.function_name } );
}
-
+
var link_to_old_script_report;
var old_script;
// Makes a function associated with another instance of a script (the
@@ -195,16 +195,16 @@
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
-
+
link_to_old_script_report.push( { name: shared_info.function_name } );
}
-
+
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
-
+
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
@@ -217,10 +217,10 @@
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i]));
}
-
+
// Gather compile information about old version of script.
var old_compile_info = DebugGatherCompileInfo(old_source);
-
+
// Gather compile information about new version of script.
var new_compile_info;
try {
@@ -247,20 +247,20 @@
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
- Assert(old_compile_info[function_being_patched].outer_index ==
+ Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
}
-
+
// Check that function being patched is not currently on stack.
liveedit.CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
-
+
// Committing all changes.
- var old_script_name = liveedit.CreateNameForOldScript(script);
+ var old_script_name = liveedit.CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
@@ -271,11 +271,11 @@
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
-
+
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
-
+
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
@@ -308,7 +308,7 @@
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
- // We need to link to old script all former nested functions.
+ // We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
}
@@ -323,7 +323,7 @@
}
}
}
-
+
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) {
@@ -337,7 +337,7 @@
this.next_sibling_index = null;
this.raw_array = raw_array;
}
-
+
// A structure describing SharedFunctionInfo.
Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) {
this.function_name = raw_array[0];
@@ -364,18 +364,18 @@
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
-
+
if (!scope_info1) {
return !scope_info2;
}
-
+
if (scope_info1.length != scope_info2.length) {
return false;
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
- return scope_info1.toString() == scope_info2.toString();
+ return scope_info1.toString() == scope_info2.toString();
}
// For array of wrapped shared function infos checks that none of them
@@ -384,7 +384,7 @@
Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
change_log) {
var liveedit = Debug.LiveEditChangeScript;
-
+
var shared_list = new Array();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
@@ -396,7 +396,7 @@
var shared = shared_list[i];
var description = {
name: shared.function_name,
- start_pos: shared.start_position,
+ start_pos: shared.start_position,
end_pos: shared.end_position
};
problems.push(description);
@@ -422,10 +422,93 @@
}
Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
- return "LiveEdit Failure: " + this.message;
+ return "LiveEdit Failure: " + this.message;
}
// A testing entry.
Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}
+
+// A LiveEdit namespace is declared inside a single function constructor.
+Debug.LiveEdit = new function() {
+ var LiveEdit = this;
+
+
+ // LiveEdit main entry point: changes a script text to a new string.
+ LiveEdit.SetScriptSource = function(script, new_source, change_log) {
+ var old_source = script.source;
+ var diff = FindSimpleDiff(old_source, new_source);
+ if (!diff) {
+ return;
+ }
+ Debug.LiveEditChangeScript(script, diff.change_pos, diff.old_len,
+ new_source.substring(diff.change_pos, diff.change_pos + diff.new_len),
+ change_log);
+ }
+
+
+ // Finds a difference between 2 strings in form of a single chunk.
+ // This is a temporary solution. We should calculate a read diff instead.
+ function FindSimpleDiff(old_source, new_source) {
+ var change_pos;
+ var old_len;
+ var new_len;
+
+ // A find range block. Whenever control leaves it, it should set 3 local
+ // variables declared above.
+ find_range:
+ {
+ // First look from the beginning of strings.
+ var pos1;
+ {
+ var next_pos;
+ for (pos1 = 0; true; pos1 = next_pos) {
+ if (pos1 >= old_source.length) {
+ change_pos = pos1;
+ old_len = 0;
+ new_len = new_source.length - pos1;
+ break find_range;
+ }
+ if (pos1 >= new_source.length) {
+ change_pos = pos1;
+ old_len = old_source.length - pos1;
+ new_len = 0;
+ break find_range;
+ }
+ if (old_source[pos1] != new_source[pos1]) {
+ break;
+ }
+ next_pos = pos1 + 1;
+ }
+ }
+ // Now compare strings from the ends.
+ change_pos = pos1;
+ var pos_old;
+ var pos_new;
+ {
+ for (pos_old = old_source.length - 1, pos_new = new_source.length - 1;
+ true;
+ pos_old--, pos_new--) {
+ if (pos_old - change_pos + 1 < 0 || pos_new - change_pos + 1 < 0) {
+ old_len = pos_old - change_pos + 2;
+ new_len = pos_new - change_pos + 2;
+ break find_range;
+ }
+ if (old_source[pos_old] != new_source[pos_new]) {
+ old_len = pos_old - change_pos + 1;
+ new_len = pos_new - change_pos + 1;
+ break find_range;
+ }
+ }
+ }
+ }
+
+ if (old_len == 0 && new_len == 0) {
+ // no change
+ return;
+ }
+
+ return { "change_pos": change_pos, "old_len": old_len, "new_len": new_len };
+ }
+}
diff --git a/src/log.cc b/src/log.cc
index 4441875..daf078a 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -143,15 +143,14 @@
// StackTracer implementation
//
void StackTracer::Trace(TickSample* sample) {
- if (sample->state == GC) {
- sample->frames_count = 0;
- return;
- }
+ sample->function = NULL;
+ sample->frames_count = 0;
+
+ if (sample->state == GC) return;
const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
if (js_entry_sp == 0) {
// Not executing JS now.
- sample->frames_count = 0;
return;
}
@@ -183,6 +182,8 @@
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
+#ifndef ENABLE_CPP_PROFILES_PROCESSOR
+
class Ticker: public Sampler {
public:
explicit Ticker(int interval):
@@ -224,6 +225,8 @@
Profiler* profiler_;
};
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
//
// SlidingStateWindow implementation.
@@ -1300,7 +1303,7 @@
tag = Logger::CALL_IC_TAG;
break;
}
- LOG(CodeCreateEvent(tag, code_object, description));
+ PROFILE(CodeCreateEvent(tag, code_object, description));
}
}
@@ -1334,16 +1337,16 @@
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
- shared->code(), *func_name,
- *script_name, line_num + 1));
+ PROFILE(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+ shared->code(), *func_name,
+ *script_name, line_num + 1));
} else {
// Can't distinguish enum and script here, so always use Script.
- LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
- shared->code(), *script_name));
+ PROFILE(CodeCreateEvent(Logger::SCRIPT_TAG,
+ shared->code(), *script_name));
}
} else {
- LOG(CodeCreateEvent(
+ PROFILE(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
} else if (shared->IsApiFunction()) {
@@ -1354,10 +1357,10 @@
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
- LOG(CallbackEvent(*func_name, entry_point));
+ PROFILE(CallbackEvent(*func_name, entry_point));
}
} else {
- LOG(CodeCreateEvent(
+ PROFILE(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
}
@@ -1373,7 +1376,7 @@
if (!obj->IsJSFunction()) continue;
JSFunction* jsf = JSFunction::cast(obj);
if (!jsf->is_compiled()) continue;
- LOG(FunctionCreateEvent(jsf));
+ PROFILE(FunctionCreateEvent(jsf));
}
}
@@ -1388,11 +1391,11 @@
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
- LOG(GetterCallbackEvent(name, getter_entry));
+ PROFILE(GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- LOG(SetterCallbackEvent(name, setter_entry));
+ PROFILE(SetterCallbackEvent(name, setter_entry));
}
}
}
@@ -1505,6 +1508,11 @@
}
}
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ // Disable old logging, as we are using the same '--prof' flag.
+ logging_nesting_ = 0;
+#endif
+
LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
return true;
@@ -1558,5 +1566,4 @@
#endif
}
-
} } // namespace v8::internal
diff --git a/src/log.h b/src/log.h
index 613a1e2..961b100 100644
--- a/src/log.h
+++ b/src/log.h
@@ -87,7 +87,6 @@
#define LOG(Call) ((void) 0)
#endif
-
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
@@ -378,6 +377,8 @@
static int logging_nesting_;
static int cpu_profiler_nesting_;
static int heap_profiler_nesting_;
+
+ friend class CpuProfiler;
#else
static bool is_logging() { return false; }
#endif
@@ -391,6 +392,26 @@
};
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
+class Ticker: public Sampler {
+ public:
+ explicit Ticker(int interval):
+ Sampler(interval, FLAG_prof) {}
+
+ void SampleStack(TickSample* sample) {
+ StackTracer::Trace(sample);
+ }
+ void Tick(TickSample* sample) { }
+ void SetWindow(SlidingStateWindow* window) { }
+ void ClearWindow() { }
+ void SetProfiler(Profiler* profiler) { }
+ void ClearProfiler() { }
+};
+
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
} } // namespace v8::internal
+
#endif // V8_LOG_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1f2c37d..80ad389 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -53,13 +53,13 @@
// Counters used for debugging the marking phase of mark-compact or mark-sweep
// collection.
int MarkCompactCollector::live_bytes_ = 0;
-int MarkCompactCollector::live_young_objects_ = 0;
-int MarkCompactCollector::live_old_data_objects_ = 0;
-int MarkCompactCollector::live_old_pointer_objects_ = 0;
-int MarkCompactCollector::live_code_objects_ = 0;
-int MarkCompactCollector::live_map_objects_ = 0;
-int MarkCompactCollector::live_cell_objects_ = 0;
-int MarkCompactCollector::live_lo_objects_ = 0;
+int MarkCompactCollector::live_young_objects_size_ = 0;
+int MarkCompactCollector::live_old_data_objects_size_ = 0;
+int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
+int MarkCompactCollector::live_code_objects_size_ = 0;
+int MarkCompactCollector::live_map_objects_size_ = 0;
+int MarkCompactCollector::live_cell_objects_size_ = 0;
+int MarkCompactCollector::live_lo_objects_size_ = 0;
#endif
void MarkCompactCollector::CollectGarbage() {
@@ -136,13 +136,13 @@
#ifdef DEBUG
live_bytes_ = 0;
- live_young_objects_ = 0;
- live_old_pointer_objects_ = 0;
- live_old_data_objects_ = 0;
- live_code_objects_ = 0;
- live_map_objects_ = 0;
- live_cell_objects_ = 0;
- live_lo_objects_ = 0;
+ live_young_objects_size_ = 0;
+ live_old_pointer_objects_size_ = 0;
+ live_old_data_objects_size_ = 0;
+ live_code_objects_size_ = 0;
+ live_map_objects_size_ = 0;
+ live_cell_objects_size_ = 0;
+ live_lo_objects_size_ = 0;
#endif
}
@@ -742,21 +742,21 @@
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
if (Heap::new_space()->Contains(obj)) {
- live_young_objects_++;
+ live_young_objects_size_ += obj->Size();
} else if (Heap::map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
- live_map_objects_++;
+ live_map_objects_size_ += obj->Size();
} else if (Heap::cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
- live_cell_objects_++;
+ live_cell_objects_size_ += obj->Size();
} else if (Heap::old_pointer_space()->Contains(obj)) {
- live_old_pointer_objects_++;
+ live_old_pointer_objects_size_ += obj->Size();
} else if (Heap::old_data_space()->Contains(obj)) {
- live_old_data_objects_++;
+ live_old_data_objects_size_ += obj->Size();
} else if (Heap::code_space()->Contains(obj)) {
- live_code_objects_++;
+ live_code_objects_size_ += obj->Size();
} else if (Heap::lo_space()->Contains(obj)) {
- live_lo_objects_++;
+ live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
}
@@ -1068,31 +1068,210 @@
}
-static void SweepSpace(NewSpace* space) {
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space. Forwading address is written directly into
+// first word of object without any encoding. If object is dead we are writing
+// NULL as a forwarding address.
+// The second pass updates pointers to new space in all spaces. It is possible
+// to encounter pointers to dead objects during traversal of remembered set for
+// map space because remembered set bits corresponding to dead maps are cleared
+// later during map space sweeping.
+static void MigrateObject(Address dst, Address src, int size) {
+ Heap::CopyBlock(reinterpret_cast<Object**>(dst),
+ reinterpret_cast<Object**>(src),
+ size);
+
+ Memory::Address_at(src) = dst;
+}
+
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+ public:
+ void VisitPointer(Object** p) {
+ UpdatePointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) UpdatePointer(p);
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VisitPointer(&target);
+ rinfo->set_target_address(Code::cast(target)->instruction_start());
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ VisitPointer(&target);
+ rinfo->set_call_address(Code::cast(target)->instruction_start());
+ }
+
+ private:
+ void UpdatePointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+ Address old_addr = obj->address();
+
+ if (Heap::new_space()->Contains(obj)) {
+ ASSERT(Heap::InFromSpace(*p));
+ *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
+ }
+ }
+};
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It can encounter pointers to dead objects in new space when traversing map
+// space (see comment for MigrateObject).
+static void UpdatePointerToNewGen(HeapObject** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ Address old_addr = (*p)->address();
+ ASSERT(Heap::InFromSpace(*p));
+
+ Address new_addr = Memory::Address_at(old_addr);
+
+ // Object pointed by *p is dead. Update is not required.
+ if (new_addr == NULL) return;
+
+ *p = HeapObject::FromAddress(new_addr);
+}
+
+
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
+ Address old_addr = HeapObject::cast(*p)->address();
+ Address new_addr = Memory::Address_at(old_addr);
+ return String::cast(HeapObject::FromAddress(new_addr));
+}
+
+
+static bool TryPromoteObject(HeapObject* object, int object_size) {
+ Object* result;
+
+ if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ MigrateObject(target->address(), object->address(), object_size);
+ Heap::UpdateRSet(target);
+ return true;
+ }
+ } else {
+ OldSpace* target_space = Heap::TargetSpace(object);
+
+ ASSERT(target_space == Heap::old_pointer_space() ||
+ target_space == Heap::old_data_space());
+ result = target_space->AllocateRaw(object_size);
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ MigrateObject(target->address(), object->address(), object_size);
+ if (target_space == Heap::old_pointer_space()) {
+ Heap::UpdateRSet(target);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static void SweepNewSpace(NewSpace* space) {
+ Heap::CheckNewSpaceExpansionCriteria();
+
+ Address from_bottom = space->bottom();
+ Address from_top = space->top();
+
+ // Flip the semispaces. After flipping, to space is empty, from space has
+ // live objects.
+ space->Flip();
+ space->ResetAllocationInfo();
+
+ int size = 0;
+ int survivors_size = 0;
+
+ // First pass: traverse all objects in inactive semispace, remove marks,
+ // migrate live objects and write forwarding addresses.
+ for (Address current = from_bottom; current < from_top; current += size) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ if (object->IsMarked()) {
+ object->ClearMark();
+ MarkCompactCollector::tracer()->decrement_marked_count();
+
+ size = object->Size();
+ survivors_size += size;
+
+ if (Heap::ShouldBePromoted(current, size) &&
+ TryPromoteObject(object, size)) {
+ continue;
+ }
+
+ // Promotion either failed or not required.
+ // Copy the content of the object.
+ Object* target = space->AllocateRaw(size);
+
+ // Allocation cannot fail at this point: semispaces are of equal size.
+ ASSERT(!target->IsFailure());
+
+ MigrateObject(HeapObject::cast(target)->address(), current, size);
+ } else {
+ size = object->Size();
+ Memory::Address_at(current) = NULL;
+ }
+ }
+
+ // Second pass: find pointers to new space and update them.
+ PointersToNewGenUpdatingVisitor updating_visitor;
+
+ // Update pointers in to space.
HeapObject* object;
for (Address current = space->bottom();
current < space->top();
current += object->Size()) {
object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
- } else {
- // We give non-live objects a map that will correctly give their size,
- // since their existing map might not be live after the collection.
- int size = object->Size();
- if (size >= ByteArray::kHeaderSize) {
- object->set_map(Heap::raw_unchecked_byte_array_map());
- ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
- } else {
- ASSERT(size == kPointerSize);
- object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
- }
- ASSERT(object->Size() == size);
- }
- // The object is now unmarked for the call to Size() at the top of the
- // loop.
+
+ object->IterateBody(object->map()->instance_type(),
+ object->Size(),
+ &updating_visitor);
}
+
+ // Update roots.
+ Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+
+ // Update pointers in old spaces.
+ Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
+ Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
+ Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
+
+ // Update pointers from cells.
+ HeapObjectIterator cell_iterator(Heap::cell_space());
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL;
+ cell = cell_iterator.next()) {
+ if (cell->IsJSGlobalPropertyCell()) {
+ Address value_address =
+ reinterpret_cast<Address>(cell) +
+ (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+ updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ }
+ }
+
+ // Update pointers from external string table.
+ Heap::UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
+ // All pointers were updated. Update auxiliary allocation info.
+ Heap::IncrementYoungSurvivorsCounter(survivors_size);
+ space->set_age_mark(space->top());
}
@@ -1382,10 +1561,12 @@
ASSERT(FreeListNode::IsFreeListNode(vacant_map));
ASSERT(map_to_evacuate->IsMap());
- memcpy(
- reinterpret_cast<void*>(vacant_map->address()),
- reinterpret_cast<void*>(map_to_evacuate->address()),
- Map::kSize);
+ ASSERT(Map::kSize % 4 == 0);
+
+ Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
+ reinterpret_cast<Object**>(map_to_evacuate->address()),
+ Map::kSize);
+
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
@@ -1465,10 +1646,11 @@
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
- SweepSpace(Heap::new_space());
+ SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
- int live_maps = Heap::map_space()->Size() / Map::kSize;
- ASSERT(live_map_objects_ == live_maps);
+ int live_maps_size = Heap::map_space()->Size();
+ int live_maps = live_maps_size / Map::kSize;
+ ASSERT(live_map_objects_size_ == live_maps_size);
if (Heap::map_space()->NeedsCompaction(live_maps)) {
MapCompact map_compact(live_maps);
@@ -1500,7 +1682,7 @@
Address start,
Address end,
HeapObjectCallback size_func) {
- int live_objects = 0;
+ int live_objects_size = 0;
Address current = start;
while (current < end) {
uint32_t encoded_map = Memory::uint32_at(current);
@@ -1509,11 +1691,12 @@
} else if (encoded_map == kMultiFreeEncoding) {
current += Memory::int_at(current + kIntSize);
} else {
- live_objects++;
- current += size_func(HeapObject::FromAddress(current));
+ int size = size_func(HeapObject::FromAddress(current));
+ current += size;
+ live_objects_size += size;
}
}
- return live_objects;
+ return live_objects_size;
}
@@ -1639,36 +1822,36 @@
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
- int live_maps = IterateLiveObjects(Heap::map_space(),
- &UpdatePointersInOldObject);
- int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
- &UpdatePointersInOldObject);
- int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+ int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
- int live_codes = IterateLiveObjects(Heap::code_space(),
- &UpdatePointersInOldObject);
- int live_cells = IterateLiveObjects(Heap::cell_space(),
- &UpdatePointersInOldObject);
- int live_news = IterateLiveObjects(Heap::new_space(),
- &UpdatePointersInNewObject);
+ int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
+ &UpdatePointersInOldObject);
+ int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
+ &UpdatePointersInOldObject);
+ int live_codes_size = IterateLiveObjects(Heap::code_space(),
+ &UpdatePointersInOldObject);
+ int live_cells_size = IterateLiveObjects(Heap::cell_space(),
+ &UpdatePointersInOldObject);
+ int live_news_size = IterateLiveObjects(Heap::new_space(),
+ &UpdatePointersInNewObject);
// Large objects do not move, the map word can be updated directly.
LargeObjectIterator it(Heap::lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
UpdatePointersInNewObject(obj);
- USE(live_maps);
- USE(live_pointer_olds);
- USE(live_data_olds);
- USE(live_codes);
- USE(live_cells);
- USE(live_news);
- ASSERT(live_maps == live_map_objects_);
- ASSERT(live_data_olds == live_old_data_objects_);
- ASSERT(live_pointer_olds == live_old_pointer_objects_);
- ASSERT(live_codes == live_code_objects_);
- ASSERT(live_cells == live_cell_objects_);
- ASSERT(live_news == live_young_objects_);
+ USE(live_maps_size);
+ USE(live_pointer_olds_size);
+ USE(live_data_olds_size);
+ USE(live_codes_size);
+ USE(live_cells_size);
+ USE(live_news_size);
+ ASSERT(live_maps_size == live_map_objects_size_);
+ ASSERT(live_data_olds_size == live_old_data_objects_size_);
+ ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+ ASSERT(live_codes_size == live_code_objects_size_);
+ ASSERT(live_cells_size == live_cell_objects_size_);
+ ASSERT(live_news_size == live_young_objects_size_);
}
@@ -1783,27 +1966,31 @@
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
- int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
- int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
- &RelocateOldPointerObject);
- int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
- &RelocateOldDataObject);
- int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
- int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
- int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
+ int live_maps_size = IterateLiveObjects(Heap::map_space(),
+ &RelocateMapObject);
+ int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
+ &RelocateOldPointerObject);
+ int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
+ &RelocateOldDataObject);
+ int live_codes_size = IterateLiveObjects(Heap::code_space(),
+ &RelocateCodeObject);
+ int live_cells_size = IterateLiveObjects(Heap::cell_space(),
+ &RelocateCellObject);
+ int live_news_size = IterateLiveObjects(Heap::new_space(),
+ &RelocateNewObject);
- USE(live_maps);
- USE(live_data_olds);
- USE(live_pointer_olds);
- USE(live_codes);
- USE(live_cells);
- USE(live_news);
- ASSERT(live_maps == live_map_objects_);
- ASSERT(live_data_olds == live_old_data_objects_);
- ASSERT(live_pointer_olds == live_old_pointer_objects_);
- ASSERT(live_codes == live_code_objects_);
- ASSERT(live_cells == live_cell_objects_);
- ASSERT(live_news == live_young_objects_);
+ USE(live_maps_size);
+ USE(live_pointer_olds_size);
+ USE(live_data_olds_size);
+ USE(live_codes_size);
+ USE(live_cells_size);
+ USE(live_news_size);
+ ASSERT(live_maps_size == live_map_objects_size_);
+ ASSERT(live_data_olds_size == live_old_data_objects_size_);
+ ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+ ASSERT(live_codes_size == live_code_objects_size_);
+ ASSERT(live_cells_size == live_cell_objects_size_);
+ ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
Heap::new_space()->Flip();
@@ -1821,6 +2008,9 @@
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
+
+ Heap::CheckNewSpaceExpansionCriteria();
+ Heap::IncrementYoungSurvivorsCounter(live_news_size);
}
@@ -1840,7 +2030,10 @@
Address old_addr = obj->address();
if (new_addr != old_addr) {
- memmove(new_addr, old_addr, Map::kSize); // copy contents
+ // Move contents.
+ Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+ reinterpret_cast<Object**>(old_addr),
+ Map::kSize);
}
#ifdef DEBUG
@@ -1896,14 +2089,17 @@
Address old_addr = obj->address();
if (new_addr != old_addr) {
- memmove(new_addr, old_addr, obj_size); // Copy contents
+ // Move contents.
+ Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+ reinterpret_cast<Object**>(old_addr),
+ obj_size);
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
- LOG(FunctionMoveEvent(old_addr, new_addr));
+ PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
return obj_size;
@@ -1940,7 +2136,10 @@
Address old_addr = obj->address();
if (new_addr != old_addr) {
- memmove(new_addr, old_addr, obj_size); // Copy contents.
+ // Move contents.
+ Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+ reinterpret_cast<Object**>(old_addr),
+ obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -1948,7 +2147,7 @@
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
- LOG(CodeMoveEvent(old_addr, new_addr));
+ PROFILE(CodeMoveEvent(old_addr, new_addr));
}
return obj_size;
@@ -1976,9 +2175,9 @@
#endif
// New and old addresses cannot overlap.
- memcpy(reinterpret_cast<void*>(new_addr),
- reinterpret_cast<void*>(old_addr),
- obj_size);
+ Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
+ reinterpret_cast<Object**>(old_addr),
+ obj_size);
#ifdef DEBUG
if (FLAG_gc_verbose) {
@@ -1988,7 +2187,7 @@
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
- LOG(FunctionMoveEvent(old_addr, new_addr));
+ PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
return obj_size;
@@ -2010,9 +2209,9 @@
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
- LOG(CodeDeleteEvent(obj->address()));
+ PROFILE(CodeDeleteEvent(obj->address()));
} else if (obj->IsJSFunction()) {
- LOG(FunctionDeleteEvent(obj->address()));
+ PROFILE(FunctionDeleteEvent(obj->address()));
}
#endif
}
diff --git a/src/mark-compact.h b/src/mark-compact.h
index ab572f6..27335f2 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -407,26 +407,26 @@
// Counters used for debugging the marking phase of mark-compact or
// mark-sweep collection.
- // Number of live objects in Heap::to_space_.
- static int live_young_objects_;
+ // Size of live objects in Heap::to_space_.
+ static int live_young_objects_size_;
- // Number of live objects in Heap::old_pointer_space_.
- static int live_old_pointer_objects_;
+ // Size of live objects in Heap::old_pointer_space_.
+ static int live_old_pointer_objects_size_;
- // Number of live objects in Heap::old_data_space_.
- static int live_old_data_objects_;
+ // Size of live objects in Heap::old_data_space_.
+ static int live_old_data_objects_size_;
- // Number of live objects in Heap::code_space_.
- static int live_code_objects_;
+ // Size of live objects in Heap::code_space_.
+ static int live_code_objects_size_;
- // Number of live objects in Heap::map_space_.
- static int live_map_objects_;
+ // Size of live objects in Heap::map_space_.
+ static int live_map_objects_size_;
- // Number of live objects in Heap::cell_space_.
- static int live_cell_objects_;
+ // Size of live objects in Heap::cell_space_.
+ static int live_cell_objects_size_;
- // Number of live objects in Heap::lo_space_.
- static int live_lo_objects_;
+ // Size of live objects in Heap::lo_space_.
+ static int live_lo_objects_size_;
// Number of live bytes in this collection.
static int live_bytes_;
diff --git a/src/messages.js b/src/messages.js
index b8a1070..de6a362 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -433,6 +433,30 @@
/**
+ * Returns the name of script if available, contents of sourceURL comment
+ * otherwise. See
+ * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
+ * for details on using //@ sourceURL comment to identify scritps that don't
+ * have name.
+ *
+ * @return {?string} script name if present, value for //@ sourceURL comment
+ * otherwise.
+ */
+Script.prototype.nameOrSourceURL = function() {
+ if (this.name)
+ return this.name;
+ // TODO(608): the spaces in a regexp below had to be escaped as \040
+ // because this file is being processed by js2c whose handling of spaces
+ // in regexps is broken. Also, ['"] are excluded from allowed URLs to
+ // avoid matches against sources that invoke evals with sourceURL.
+ var sourceUrlPattern =
+ /\/\/@[\040\t]sourceURL=[\040\t]*([^\s'"]*)[\040\t]*$/m;
+ var match = sourceUrlPattern.exec(this.source);
+ return match ? match[1] : this.name;
+}
+
+
+/**
* Class for source location. A source location is a position within some
* source with the following properties:
* script : script object for the source
@@ -743,7 +767,7 @@
} else {
eval_origin += "<anonymous>";
}
-
+
var eval_from_script = script.eval_from_script;
if (eval_from_script) {
if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
@@ -764,7 +788,7 @@
}
}
}
-
+
return eval_origin;
};
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index dfe297b..29d0069 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -67,7 +67,7 @@
}
}
}
-
+
if (IS_UNDEFINED(value)) {
mirror = new UndefinedMirror();
} else if (IS_NULL(value)) {
@@ -110,7 +110,7 @@
return mirror_cache_[handle];
}
-
+
/**
* Returns the mirror for the undefined value.
*
@@ -622,7 +622,7 @@
var propertyNames;
var elementNames;
var total = 0;
-
+
// Find all the named properties.
if (kind & PropertyKind.Named) {
// Get the local property names.
@@ -1223,7 +1223,7 @@
/**
* Returns whether this property is natively implemented by the host or a set
* through JavaScript code.
- * @return {boolean} True if the property is
+ * @return {boolean} True if the property is
* UndefinedMirror if there is no setter for this property
*/
PropertyMirror.prototype.isNative = function() {
@@ -1390,7 +1390,7 @@
FrameMirror.prototype.func = function() {
// Get the function for this frame from the VM.
var f = this.details_.func();
-
+
// Create a function mirror. NOTE: MakeMirror cannot be used here as the
// value returned from the VM might be a string if the function for the
// frame is unresolved.
@@ -1728,8 +1728,7 @@
ScriptMirror.prototype.name = function() {
- // If we have name, we trust it more than sourceURL from comments
- return this.script_.name || this.sourceUrlFromComment_();
+ return this.script_.name || this.script_.nameOrSourceURL();
};
@@ -1825,29 +1824,6 @@
/**
- * Returns a suggested script URL from comments in script code (if found),
- * undefined otherwise. Used primarily by debuggers for identifying eval()'ed
- * scripts. See
- * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details.
- *
- * @return {?string} value for //@ sourceURL comment
- */
-ScriptMirror.prototype.sourceUrlFromComment_ = function() {
- if (!('sourceUrl_' in this) && this.source()) {
- // TODO(608): the spaces in a regexp below had to be escaped as \040
- // because this file is being processed by js2c whose handling of spaces
- // in regexps is broken.
- // We're not using \s here to prevent \n from matching.
- var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m;
- var match = sourceUrlPattern.exec(this.source());
- this.sourceUrl_ = match ? match[1] : undefined;
- }
- return this.sourceUrl_;
-};
-
-
-/**
* Mirror object for context.
* @param {Object} data The context data
* @constructor
@@ -1928,10 +1904,10 @@
JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
// Collect the protocol representation of the referenced objects in an array.
var content = [];
-
+
// Get the number of referenced objects.
var count = this.mirrors_.length;
-
+
for (var i = 0; i < count; i++) {
content.push(this.serialize_(this.mirrors_[i], false, false));
}
@@ -1966,7 +1942,7 @@
return;
}
}
-
+
// Add the mirror to the list of mirrors to be serialized.
this.mirrors_.push(mirror);
}
@@ -1978,7 +1954,7 @@
* @param {Mirror} mirror Mirror to serialize.
* @return {Object} Protocol reference object.
*/
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
+JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
function(mirror) {
var o = {};
o.ref = mirror.handle();
@@ -2025,7 +2001,7 @@
return {'ref' : mirror.handle()};
}
}
-
+
// Collect the JSON property/value pairs.
var content = {};
@@ -2137,7 +2113,7 @@
// Always add the text representation.
content.text = mirror.toText();
-
+
// Create and return the JSON string.
return content;
}
@@ -2170,7 +2146,7 @@
if (mirror.hasIndexedInterceptor()) {
content.indexedInterceptor = true;
}
-
+
// Add function specific properties.
if (mirror.isFunction()) {
// Add function specific properties.
@@ -2185,7 +2161,7 @@
if (mirror.script()) {
content.script = this.serializeReference(mirror.script());
content.scriptId = mirror.script().id();
-
+
serializeLocationFields(mirror.sourceLocation(), content);
}
}
@@ -2224,13 +2200,13 @@
* "position":"<position>",
* "line":"<line>",
* "column":"<column>",
- *
+ *
* @param {SourceLocation} location The location to serialize, may be undefined.
*/
function serializeLocationFields (location, content) {
if (!location) {
return;
- }
+ }
content.position = location.position;
var line = location.line;
if (!IS_UNDEFINED(line)) {
@@ -2264,7 +2240,7 @@
*/
JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
var result = {};
-
+
result.name = propertyMirror.name();
var propertyValue = propertyMirror.value();
if (this.inlineRefs_() && propertyValue.isValue()) {
@@ -2316,7 +2292,7 @@
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;
}
-
+
content.scopes = [];
for (var i = 0; i < mirror.scopeCount(); i++) {
var scope = mirror.scope(i);
@@ -2358,5 +2334,5 @@
return '-Infinity';
}
}
- return value;
+ return value;
}
diff --git a/src/objects.h b/src/objects.h
index 5a0db01..9197466 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3667,6 +3667,13 @@
FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
+
+ // In-object fields.
+ static const int kSourceFieldIndex = 0;
+ static const int kGlobalFieldIndex = 1;
+ static const int kIgnoreCaseFieldIndex = 2;
+ static const int kMultilineFieldIndex = 3;
+ static const int kLastIndexFieldIndex = 4;
};
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index cd7bcb1..b628295 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -727,44 +727,62 @@
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
- TickSample sample;
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (Logger::state() == GC || !IsVmThread()) return;
+
+ TickSample* sample = NULL;
+#else
+ TickSample sample_obj;
+ TickSample* sample = &sample_obj;
// We always sample the VM state.
- sample.state = Logger::state();
+ sample->state = Logger::state();
+#endif
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ sample = CpuProfiler::TickSampleEvent();
+#endif
+ if (sample != NULL) {
#if V8_HOST_ARCH_IA32
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
- sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
+ sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#elif V8_HOST_ARCH_MIPS
// Implement this on MIPS.
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
#endif
- if (IsVmThread())
- active_sampler_->SampleStack(&sample);
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ active_sampler_->SampleStack(sample);
+#else
+ if (IsVmThread()) {
+ active_sampler_->SampleStack(sample);
+ }
+#endif
+ }
}
-
- active_sampler_->Tick(&sample);
+#ifndef ENABLE_CPP_PROFILES_PROCESSOR
+ active_sampler_->Tick(sample);
+#endif
#endif
}
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 4502987..7da3cca 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -544,12 +544,19 @@
// Sampler thread handler.
void Runner() {
- // Loop until the sampler is disengaged.
- while (sampler_->IsActive()) {
- TickSample sample;
+ // Loop until the sampler is disengaged, keeping the specified samling freq.
+ for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (Logger::state() == GC) continue;
+
+ TickSample* sample = NULL;
+#else
+ TickSample sample_obj;
+ TickSample* sample = &sample_obj;
// We always sample the VM state.
- sample.state = Logger::state();
+ sample->state = Logger::state();
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
@@ -580,19 +587,23 @@
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
- sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler_->SampleStack(&sample);
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ sample = CpuProfiler::TickSampleEvent();
+#endif
+ if (sample != NULL) {
+ sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+ sampler_->SampleStack(sample);
+ }
}
thread_resume(profiled_thread_);
}
+#ifndef ENABLE_CPP_PROFILES_PROCESSOR
// Invoke tick handler with program counter and stack pointer.
- sampler_->Tick(&sample);
-
- // Wait until next sampling.
- usleep(sampler_->interval_ * 1000);
+ sampler_->Tick(sample);
+#endif
}
}
};
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 48f306d..9b09aa3 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1803,37 +1803,48 @@
// Context used for sampling the register state of the profiled thread.
CONTEXT context;
memset(&context, 0, sizeof(context));
- // Loop until the sampler is disengaged.
- while (sampler_->IsActive()) {
- TickSample sample;
+ // Loop until the sampler is disengaged, keeping the specified samling freq.
+ for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (Logger::state() == GC) continue;
+
+ TickSample* sample = NULL;
+#else
+ TickSample sample_obj;
+ TickSample* sample = &sample_obj;
// We always sample the VM state.
- sample.state = Logger::state();
+ sample->state = Logger::state();
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& SuspendThread(profiled_thread_) != (DWORD)-1) {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(context.Rip);
- sample.sp = reinterpret_cast<Address>(context.Rsp);
- sample.fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample.pc = reinterpret_cast<Address>(context.Eip);
- sample.sp = reinterpret_cast<Address>(context.Esp);
- sample.fp = reinterpret_cast<Address>(context.Ebp);
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ sample = CpuProfiler::TickSampleEvent();
#endif
- sampler_->SampleStack(&sample);
+ if (sample != NULL) {
+#if V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(context.Rip);
+ sample->sp = reinterpret_cast<Address>(context.Rsp);
+ sample->fp = reinterpret_cast<Address>(context.Rbp);
+#else
+ sample->pc = reinterpret_cast<Address>(context.Eip);
+ sample->sp = reinterpret_cast<Address>(context.Esp);
+ sample->fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+ sampler_->SampleStack(sample);
+ }
}
ResumeThread(profiled_thread_);
}
+#ifndef ENABLE_CPP_PROFILES_PROCESSOR
// Invoke tick handler with program counter and stack pointer.
- sampler_->Tick(&sample);
-
- // Wait until next sampling.
- Sleep(sampler_->interval_);
+ sampler_->Tick(sample);
+#endif
}
}
};
diff --git a/src/platform.h b/src/platform.h
index 76028e6..85e6fd3 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -527,7 +527,7 @@
Address fp; // Frame pointer.
Address function; // The last called JS function.
StateTag state; // The state of the VM.
- static const int kMaxFramesCount = 100;
+ static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.
};
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 5e92be4..0ff98a7 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -28,24 +28,27 @@
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
#include "profile-generator.h"
namespace v8 {
namespace internal {
-
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
+ const char* name_prefix,
const char* name,
const char* resource_name,
int line_number)
: tag_(tag),
+ name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number) {
}
-bool CodeEntry::is_js_function() {
+bool CodeEntry::is_js_function() const {
return tag_ == Logger::FUNCTION_TAG
|| tag_ == Logger::LAZY_COMPILE_TAG
|| tag_ == Logger::SCRIPT_TAG;
@@ -76,6 +79,14 @@
}
+bool CpuProfilesCollection::is_last_profile() {
+ // Called from VM thread, and only it can mutate the list,
+ // so no locking is needed here.
+ return current_profiles_.length() == 1;
+}
+
} } // namespace v8::internal
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 7f020c5..e660760 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -25,15 +25,20 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
#include "v8.h"
#include "profile-generator-inl.h"
-
namespace v8 {
namespace internal {
+const char* CodeEntry::kEmptyNamePrefix = "";
+const int CodeEntry::kNoLineNumberInfo = -1;
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -47,25 +52,19 @@
children_.Lookup(entry, CodeEntryHash(entry), true);
if (map_entry->value == NULL) {
// New node added.
- map_entry->value = new ProfileNode(entry);
+ ProfileNode* new_node = new ProfileNode(entry);
+ map_entry->value = new_node;
+ children_list_.Add(new_node);
}
return reinterpret_cast<ProfileNode*>(map_entry->value);
}
-void ProfileNode::GetChildren(List<ProfileNode*>* children) {
- for (HashMap::Entry* p = children_.Start();
- p != NULL;
- p = children_.Next(p)) {
- children->Add(reinterpret_cast<ProfileNode*>(p->value));
- }
-}
-
-
void ProfileNode::Print(int indent) {
- OS::Print("%4u %4u %*c %s\n",
+ OS::Print("%5u %5u %*c %s%s\n",
total_ticks_, self_ticks_,
indent, ' ',
+ entry_ != NULL ? entry_->name_prefix() : "",
entry_ != NULL ? entry_->name() : "");
for (HashMap::Entry* p = children_.Start();
p != NULL;
@@ -123,39 +122,46 @@
namespace {
-struct Position {
- Position(ProfileNode* a_node, HashMap::Entry* a_p)
- : node(a_node), p(a_p) { }
+class Position {
+ public:
+ explicit Position(ProfileNode* node)
+ : node(node), child_idx_(0) { }
INLINE(ProfileNode* current_child()) {
- return reinterpret_cast<ProfileNode*>(p->value);
+ return node->children()->at(child_idx_);
}
+ INLINE(bool has_current_child()) {
+ return child_idx_ < node->children()->length();
+ }
+ INLINE(void next_child()) { ++child_idx_; }
+
ProfileNode* node;
- HashMap::Entry* p;
+ private:
+ int child_idx_;
};
} // namespace
+// Non-recursive implementation of breadth-first post-order tree traversal.
template <typename Callback>
void ProfileTree::TraverseBreadthFirstPostOrder(Callback* callback) {
List<Position> stack(10);
- stack.Add(Position(root_, root_->children_.Start()));
+ stack.Add(Position(root_));
do {
Position& current = stack.last();
- if (current.p != NULL) {
- stack.Add(Position(current.current_child(),
- current.current_child()->children_.Start()));
+ if (current.has_current_child()) {
+ stack.Add(Position(current.current_child()));
} else {
callback->AfterAllChildrenTraversed(current.node);
if (stack.length() > 1) {
Position& parent = stack[stack.length() - 2];
callback->AfterChildTraversed(parent.node, current.node);
- parent.p = parent.node->children_.Next(parent.p);
+ parent.next_child();
// Remove child from the stack.
stack.RemoveLast();
}
}
- } while (stack.length() > 1 || stack.last().p != NULL);
+ } while (stack.length() > 1 || stack.last().has_current_child());
}
@@ -175,7 +181,6 @@
} // namespace
-// Non-recursive implementation of breadth-first tree traversal.
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
TraverseBreadthFirstPostOrder(&cb);
@@ -242,8 +247,22 @@
}
+void CodeMap::CodeTreePrinter::Call(
+ const Address& key, const CodeMap::CodeEntryInfo& value) {
+ OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+}
+
+
+void CodeMap::Print() {
+ CodeTreePrinter printer;
+ tree_.ForEach(&printer);
+}
+
+
CpuProfilesCollection::CpuProfilesCollection()
- : function_and_resource_names_(StringsMatch) {
+ : function_and_resource_names_(StringsMatch),
+ profiles_uids_(CpuProfilesMatch),
+ current_profiles_semaphore_(OS::CreateSemaphore(1)) {
}
@@ -262,6 +281,8 @@
CpuProfilesCollection::~CpuProfilesCollection() {
+ delete current_profiles_semaphore_;
+ current_profiles_.Iterate(DeleteCpuProfile);
profiles_.Iterate(DeleteCpuProfile);
code_entries_.Iterate(DeleteCodeEntry);
args_count_names_.Iterate(DeleteArgsCountName);
@@ -273,8 +294,63 @@
}
-void CpuProfilesCollection::AddProfile(unsigned uid) {
- profiles_.Add(new CpuProfile());
+bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
+ ASSERT(uid > 0);
+ current_profiles_semaphore_->Wait();
+ for (int i = 0; i < current_profiles_.length(); ++i) {
+ if (strcmp(current_profiles_[i]->title(), title) == 0) {
+ // Ignore attempts to start profile with the same title.
+ current_profiles_semaphore_->Signal();
+ return false;
+ }
+ }
+ current_profiles_.Add(new CpuProfile(title, uid));
+ current_profiles_semaphore_->Signal();
+ return true;
+}
+
+
+bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
+ return StartProfiling(GetName(title), uid);
+}
+
+
+CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
+ const int title_len = strlen(title);
+ CpuProfile* profile = NULL;
+ current_profiles_semaphore_->Wait();
+ for (int i = current_profiles_.length() - 1; i >= 0; --i) {
+ if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
+ profile = current_profiles_.Remove(i);
+ break;
+ }
+ }
+ current_profiles_semaphore_->Signal();
+
+ if (profile != NULL) {
+ profile->CalculateTotalTicks();
+ profiles_.Add(profile);
+ HashMap::Entry* entry =
+ profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
+ static_cast<uint32_t>(profile->uid()),
+ true);
+ ASSERT(entry->value == NULL);
+ entry->value = profile;
+ }
+ return profile;
+}
+
+
+CpuProfile* CpuProfilesCollection::StopProfiling(String* title) {
+ return StopProfiling(GetName(title));
+}
+
+
+CpuProfile* CpuProfilesCollection::GetProfile(unsigned uid) {
+ HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid),
+ false);
+ return entry != NULL ? reinterpret_cast<CpuProfile*>(entry->value) : NULL;
}
@@ -283,6 +359,7 @@
String* resource_name,
int line_number) {
CodeEntry* entry = new CodeEntry(tag,
+ CodeEntry::kEmptyNamePrefix,
GetName(name),
GetName(resource_name),
line_number);
@@ -293,7 +370,24 @@
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name) {
- CodeEntry* entry = new CodeEntry(tag, name, "", 0);
+ CodeEntry* entry = new CodeEntry(tag,
+ CodeEntry::kEmptyNamePrefix,
+ name,
+ "",
+ CodeEntry::kNoLineNumberInfo);
+ code_entries_.Add(entry);
+ return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
+ const char* name_prefix,
+ String* name) {
+ CodeEntry* entry = new CodeEntry(tag,
+ name_prefix,
+ GetName(name),
+ "",
+ CodeEntry::kNoLineNumberInfo);
code_entries_.Add(entry);
return entry;
}
@@ -301,7 +395,11 @@
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count) {
- CodeEntry* entry = new CodeEntry(tag, GetName(args_count), "", 0);
+ CodeEntry* entry = new CodeEntry(tag,
+ "args_count: ",
+ GetName(args_count),
+ "",
+ CodeEntry::kNoLineNumberInfo);
code_entries_.Add(entry);
return entry;
}
@@ -337,14 +435,26 @@
if (args_count_names_[args_count] == NULL) {
const int kMaximumNameLength = 32;
char* name = NewArray<char>(kMaximumNameLength);
- OS::SNPrintF(Vector<char>(name, kMaximumNameLength),
- "args_count: %d", args_count);
+ OS::SNPrintF(Vector<char>(name, kMaximumNameLength), "%d", args_count);
args_count_names_[args_count] = name;
}
return args_count_names_[args_count];
}
+void CpuProfilesCollection::AddPathToCurrentProfiles(
+ const Vector<CodeEntry*>& path) {
+ // As starting / stopping profiles is rare relatively to this
+ // method, we don't bother minimizing the duration of lock holding,
+ // e.g. copying contents of the list to a local vector.
+ current_profiles_semaphore_->Wait();
+ for (int i = 0; i < current_profiles_.length(); ++i) {
+ current_profiles_[i]->AddPath(path);
+ }
+ current_profiles_semaphore_->Signal();
+}
+
+
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles) {
}
@@ -377,8 +487,9 @@
*entry++ = code_map_.FindEntry(*stack_pos);
}
- profile()->AddPath(entries);
+ profiles_->AddPathToCurrentProfiles(entries);
}
-
} } // namespace v8::internal
+
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c0454cb..83d7a25 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -28,25 +28,35 @@
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+
#include "hashmap.h"
namespace v8 {
namespace internal {
-
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag_,
- const char* name_,
- const char* resource_name_,
- int line_number_));
+ INLINE(CodeEntry(Logger::LogEventsAndTags tag,
+ const char* name_prefix,
+ const char* name,
+ const char* resource_name,
+ int line_number));
- INLINE(bool is_js_function());
- INLINE(const char* name()) { return name_; }
+ INLINE(bool is_js_function() const);
+ INLINE(const char* name_prefix() const) { return name_prefix_; }
+ INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
+ INLINE(const char* name() const) { return name_; }
+ INLINE(const char* resource_name() const) { return resource_name_; }
+ INLINE(int line_number() const) { return line_number_; }
+
+ static const char* kEmptyNamePrefix;
+ static const int kNoLineNumberInfo;
private:
Logger::LogEventsAndTags tag_;
+ const char* name_prefix_;
const char* name_;
const char* resource_name_;
int line_number_;
@@ -67,7 +77,7 @@
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
- void GetChildren(List<ProfileNode*>* children);
+ INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
void Print(int indent);
@@ -85,14 +95,13 @@
unsigned self_ticks_;
// CodeEntry* -> ProfileNode*
HashMap children_;
-
- friend class ProfileTree;
+ List<ProfileNode*> children_list_;
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
-class ProfileTree BASE_EMBEDDED {
+class ProfileTree {
public:
ProfileTree() : root_(new ProfileNode(NULL)) { }
~ProfileTree();
@@ -101,7 +110,7 @@
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
- ProfileNode* root() { return root_; }
+ ProfileNode* root() const { return root_; }
void ShortPrint();
void Print() {
@@ -120,18 +129,24 @@
class CpuProfile {
public:
- CpuProfile() { }
+ CpuProfile(const char* title, unsigned uid)
+ : title_(title), uid_(uid) { }
+
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
- INLINE(ProfileTree* top_down()) { return &top_down_; }
- INLINE(ProfileTree* bottom_up()) { return &bottom_up_; }
+ INLINE(const char* title() const) { return title_; }
+ INLINE(unsigned uid() const) { return uid_; }
+ INLINE(const ProfileTree* top_down() const) { return &top_down_; }
+ INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
void ShortPrint();
void Print();
private:
+ const char* title_;
+ unsigned uid_;
ProfileTree top_down_;
ProfileTree bottom_up_;
@@ -139,7 +154,7 @@
};
-class CodeMap BASE_EMBEDDED {
+class CodeMap {
public:
CodeMap() { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
@@ -148,6 +163,8 @@
void AddAlias(Address alias, Address addr);
CodeEntry* FindEntry(Address addr);
+ void Print();
+
private:
struct CodeEntryInfo {
CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
@@ -167,6 +184,11 @@
};
typedef SplayTree<CodeTreeConfig> CodeTree;
+ class CodeTreePrinter {
+ public:
+ void Call(const Address& key, const CodeEntryInfo& value);
+ };
+
CodeTree tree_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
@@ -178,14 +200,23 @@
CpuProfilesCollection();
~CpuProfilesCollection();
- void AddProfile(unsigned uid);
+ bool StartProfiling(const char* title, unsigned uid);
+ bool StartProfiling(String* title, unsigned uid);
+ CpuProfile* StopProfiling(const char* title);
+ CpuProfile* StopProfiling(String* title);
+ INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
+ CpuProfile* GetProfile(unsigned uid);
+ inline bool is_last_profile();
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
+ CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+ const char* name_prefix, String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
- INLINE(CpuProfile* profile()) { return profiles_.last(); }
+ // Called from profile generator thread.
+ void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
private:
const char* GetName(String* name);
@@ -196,12 +227,22 @@
reinterpret_cast<char*>(key2)) == 0;
}
+ INLINE(static bool CpuProfilesMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
// String::Hash -> const char*
HashMap function_and_resource_names_;
// args_count -> char*
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<CpuProfile*> profiles_;
+ // uid -> CpuProfile*
+ HashMap profiles_uids_;
+
+ // Accessed by VM thread and profile generator thread.
+ List<CpuProfile*> current_profiles_;
+ Semaphore* current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
@@ -224,6 +265,12 @@
}
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+ const char* name_prefix,
+ String* name)) {
+ return profiles_->NewCodeEntry(tag, name_prefix, name);
+ }
+
+ INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count)) {
return profiles_->NewCodeEntry(tag, args_count);
}
@@ -233,15 +280,14 @@
INLINE(CodeMap* code_map()) { return &code_map_; }
private:
- INLINE(CpuProfile* profile()) { return profiles_->profile(); }
-
CpuProfilesCollection* profiles_;
CodeMap code_map_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
-
} } // namespace v8::internal
+#endif // ENABLE_CPP_PROFILES_PROCESSOR
+
#endif // V8_PROFILE_GENERATOR_H_
diff --git a/src/regexp.js b/src/regexp.js
index e2492f7..9adf978 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -71,32 +71,10 @@
}
}
- if (isConstructorCall) {
- // ECMA-262, section 15.10.7.1.
- %SetProperty(object, 'source', pattern,
- DONT_DELETE | READ_ONLY | DONT_ENUM);
-
- // ECMA-262, section 15.10.7.2.
- %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM);
-
- // ECMA-262, section 15.10.7.3.
- %SetProperty(object, 'ignoreCase', ignoreCase,
- DONT_DELETE | READ_ONLY | DONT_ENUM);
-
- // ECMA-262, section 15.10.7.4.
- %SetProperty(object, 'multiline', multiline,
- DONT_DELETE | READ_ONLY | DONT_ENUM);
-
- // ECMA-262, section 15.10.7.5.
- %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM);
- } else { // RegExp is being recompiled via RegExp.prototype.compile.
- %IgnoreAttributesAndSetProperty(object, 'source', pattern);
- %IgnoreAttributesAndSetProperty(object, 'global', global);
- %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
- %IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
- %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
+ if (!isConstructorCall) {
regExpCache.type = 'none';
}
+ %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
// Call internal function to compile the pattern.
%RegExpCompile(object, pattern, flags);
@@ -344,6 +322,7 @@
// on the captures array of the last successful match and the subject string
// of the last successful match.
function RegExpGetLastMatch() {
+ if (lastMatchInfoOverride) { return lastMatchInfoOverride[0]; }
var regExpSubject = LAST_SUBJECT(lastMatchInfo);
return SubString(regExpSubject,
lastMatchInfo[CAPTURE0],
@@ -352,6 +331,11 @@
function RegExpGetLastParen() {
+ if (lastMatchInfoOverride) {
+ var override = lastMatchInfoOverride;
+ if (override.length <= 3) return '';
+ return override[override.length - 3];
+ }
var length = NUMBER_OF_CAPTURES(lastMatchInfo);
if (length <= 2) return ''; // There were no captures.
// We match the SpiderMonkey behavior: return the substring defined by the
@@ -368,17 +352,32 @@
function RegExpGetLeftContext() {
- return SubString(LAST_SUBJECT(lastMatchInfo),
- 0,
- lastMatchInfo[CAPTURE0]);
+ var start_index;
+ var subject;
+ if (!lastMatchInfoOverride) {
+ start_index = lastMatchInfo[CAPTURE0];
+ subject = LAST_SUBJECT(lastMatchInfo);
+ } else {
+ var override = lastMatchInfoOverride;
+ start_index = override[override.length - 2];
+ subject = override[override.length - 1];
+ }
+ return SubString(subject, 0, start_index);
}
function RegExpGetRightContext() {
- var subject = LAST_SUBJECT(lastMatchInfo);
- return SubString(subject,
- lastMatchInfo[CAPTURE1],
- subject.length);
+ var start_index;
+ var subject;
+ if (!lastMatchInfoOverride) {
+ start_index = lastMatchInfo[CAPTURE1];
+ subject = LAST_SUBJECT(lastMatchInfo);
+ } else {
+ var override = lastMatchInfoOverride;
+ subject = override[override.length - 1];
+ start_index = override[override.length - 2] + subject.length;
+ }
+ return SubString(subject, start_index, subject.length);
}
@@ -387,6 +386,10 @@
// called with indices from 1 to 9.
function RegExpMakeCaptureGetter(n) {
return function() {
+ if (lastMatchInfoOverride) {
+ if (n < lastMatchInfoOverride.length - 2) return lastMatchInfoOverride[n];
+ return '';
+ }
var index = n * 2;
if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
var matchStart = lastMatchInfo[CAPTURE(index)];
@@ -411,6 +414,12 @@
0, // REGEXP_FIRST_CAPTURE + 1
];
+// Override last match info with an array of actual substrings.
+// Used internally by replace regexp with function.
+// The array has the format of an "apply" argument for a replacement
+// function.
+var lastMatchInfoOverride = null;
+
// -------------------------------------------------------------------
function SetupRegExp() {
@@ -430,7 +439,7 @@
%FunctionSetLength($RegExp.prototype.compile, 1);
// The properties input, $input, and $_ are aliases for each other. When this
- // value is set the value it is set to is coerced to a string.
+ // value is set the value it is set to is coerced to a string.
// Getter and setter for the input.
function RegExpGetInput() {
var regExpInput = LAST_INPUT(lastMatchInfo);
diff --git a/src/runtime.cc b/src/runtime.cc
index b349815..40b37d9 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1228,6 +1228,62 @@
}
+static Object* Runtime_RegExpInitializeObject(Arguments args) {
+ AssertNoAllocation no_alloc;
+ ASSERT(args.length() == 5);
+ CONVERT_CHECKED(JSRegExp, regexp, args[0]);
+ CONVERT_CHECKED(String, source, args[1]);
+
+ Object* global = args[2];
+ if (!global->IsTrue()) global = Heap::false_value();
+
+ Object* ignoreCase = args[3];
+ if (!ignoreCase->IsTrue()) ignoreCase = Heap::false_value();
+
+ Object* multiline = args[4];
+ if (!multiline->IsTrue()) multiline = Heap::false_value();
+
+ Map* map = regexp->map();
+ Object* constructor = map->constructor();
+ if (constructor->IsJSFunction() &&
+ JSFunction::cast(constructor)->initial_map() == map) {
+ // If we still have the original map, set in-object properties directly.
+ regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
+ // TODO(lrn): Consider skipping write barrier on booleans as well.
+ // Both true and false should be in oldspace at all times.
+ regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
+ regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
+ regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0),
+ SKIP_WRITE_BARRIER);
+ return regexp;
+ }
+
+ // Map has changed, so use generic, but slower, method.
+ PropertyAttributes final =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ regexp->IgnoreAttributesAndSetLocalProperty(Heap::source_symbol(),
+ source,
+ final);
+ regexp->IgnoreAttributesAndSetLocalProperty(Heap::global_symbol(),
+ global,
+ final);
+ regexp->IgnoreAttributesAndSetLocalProperty(Heap::ignore_case_symbol(),
+ ignoreCase,
+ final);
+ regexp->IgnoreAttributesAndSetLocalProperty(Heap::multiline_symbol(),
+ multiline,
+ final);
+ regexp->IgnoreAttributesAndSetLocalProperty(Heap::last_index_symbol(),
+ Smi::FromInt(0),
+ writable);
+ return regexp;
+}
+
+
static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -1567,9 +1623,91 @@
return CharFromCode(args[0]);
}
+
+class FixedArrayBuilder {
+ public:
+ explicit FixedArrayBuilder(int initial_capacity)
+ : array_(Factory::NewFixedArrayWithHoles(initial_capacity)),
+ length_(0) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ ASSERT(initial_capacity > 0);
+ }
+
+ explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
+ : array_(backing_store),
+ length_(0) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ ASSERT(backing_store->length() > 0);
+ }
+
+ bool HasCapacity(int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ return (length >= required_length);
+ }
+
+ void EnsureCapacity(int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ if (length < required_length) {
+ int new_length = length;
+ do {
+ new_length *= 2;
+ } while (new_length < required_length);
+ Handle<FixedArray> extended_array =
+ Factory::NewFixedArrayWithHoles(new_length);
+ array_->CopyTo(0, *extended_array, 0, length_);
+ array_ = extended_array;
+ }
+ }
+
+ void Add(Object* value) {
+ ASSERT(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+ }
+
+ void Add(Smi* value) {
+ ASSERT(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+ }
+
+ Handle<FixedArray> array() {
+ return array_;
+ }
+
+ int length() {
+ return length_;
+ }
+
+ int capacity() {
+ return array_->length();
+ }
+
+ Handle<JSArray> ToJSArray() {
+ Handle<JSArray> result_array = Factory::NewJSArrayWithElements(array_);
+ result_array->set_length(Smi::FromInt(length_));
+ return result_array;
+ }
+
+ Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
+ target_array->set_elements(*array_);
+ target_array->set_length(Smi::FromInt(length_));
+ return target_array;
+ }
+
+ private:
+ Handle<FixedArray> array_;
+ int length_;
+};
+
+
// Forward declarations.
-static const int kStringBuilderConcatHelperLengthBits = 11;
-static const int kStringBuilderConcatHelperPositionBits = 19;
+const int kStringBuilderConcatHelperLengthBits = 11;
+const int kStringBuilderConcatHelperPositionBits = 19;
template <typename schar>
static inline void StringBuilderConcatHelper(String*,
@@ -1577,15 +1715,19 @@
FixedArray*,
int);
-typedef BitField<int, 0, 11> StringBuilderSubstringLength;
-typedef BitField<int, 11, 19> StringBuilderSubstringPosition;
+typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
+ StringBuilderSubstringLength;
+typedef BitField<int,
+ kStringBuilderConcatHelperLengthBits,
+ kStringBuilderConcatHelperPositionBits>
+ StringBuilderSubstringPosition;
+
class ReplacementStringBuilder {
public:
ReplacementStringBuilder(Handle<String> subject, int estimated_part_count)
- : subject_(subject),
- parts_(Factory::NewFixedArray(estimated_part_count)),
- part_count_(0),
+ : array_builder_(estimated_part_count),
+ subject_(subject),
character_count_(0),
is_ascii_(subject->IsAsciiRepresentation()) {
// Require a non-zero initial size. Ensures that doubling the size to
@@ -1593,38 +1735,35 @@
ASSERT(estimated_part_count > 0);
}
- void EnsureCapacity(int elements) {
- int length = parts_->length();
- int required_length = part_count_ + elements;
- if (length < required_length) {
- int new_length = length;
- do {
- new_length *= 2;
- } while (new_length < required_length);
- Handle<FixedArray> extended_array =
- Factory::NewFixedArray(new_length);
- parts_->CopyTo(0, *extended_array, 0, part_count_);
- parts_ = extended_array;
- }
- }
-
- void AddSubjectSlice(int from, int to) {
+ static inline void AddSubjectSlice(FixedArrayBuilder* builder,
+ int from,
+ int to) {
ASSERT(from >= 0);
int length = to - from;
ASSERT(length > 0);
- // Can we encode the slice in 11 bits for length and 19 bits for
- // start position - as used by StringBuilderConcatHelper?
if (StringBuilderSubstringLength::is_valid(length) &&
StringBuilderSubstringPosition::is_valid(from)) {
int encoded_slice = StringBuilderSubstringLength::encode(length) |
StringBuilderSubstringPosition::encode(from);
- AddElement(Smi::FromInt(encoded_slice));
+ builder->Add(Smi::FromInt(encoded_slice));
} else {
// Otherwise encode as two smis.
- AddElement(Smi::FromInt(-length));
- AddElement(Smi::FromInt(from));
+ builder->Add(Smi::FromInt(-length));
+ builder->Add(Smi::FromInt(from));
}
- IncrementCharacterCount(length);
+ }
+
+
+ void EnsureCapacity(int elements) {
+ array_builder_.EnsureCapacity(elements);
+ }
+
+
+ void AddSubjectSlice(int from, int to) {
+ AddSubjectSlice(&array_builder_, from, to);
+ // Can we encode the slice in 11 bits for length and 19 bits for
+ // start position - as used by StringBuilderConcatHelper?
+ IncrementCharacterCount(to - from);
}
@@ -1640,7 +1779,7 @@
Handle<String> ToString() {
- if (part_count_ == 0) {
+ if (array_builder_.length() == 0) {
return Factory::empty_string();
}
@@ -1652,8 +1791,8 @@
char* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
- *parts_,
- part_count_);
+ *array_builder_.array(),
+ array_builder_.length());
} else {
// Non-ASCII.
joined_string = NewRawTwoByteString(character_count_);
@@ -1662,8 +1801,8 @@
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
- *parts_,
- part_count_);
+ *array_builder_.array(),
+ array_builder_.length());
}
return joined_string;
}
@@ -1676,8 +1815,14 @@
character_count_ += by;
}
- private:
+ Handle<JSArray> GetParts() {
+ Handle<JSArray> result =
+ Factory::NewJSArrayWithElements(array_builder_.array());
+ result->set_length(Smi::FromInt(array_builder_.length()));
+ return result;
+ }
+ private:
Handle<String> NewRawAsciiString(int size) {
CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String);
}
@@ -1690,14 +1835,12 @@
void AddElement(Object* element) {
ASSERT(element->IsSmi() || element->IsString());
- ASSERT(parts_->length() > part_count_);
- parts_->set(part_count_, element);
- part_count_++;
+ ASSERT(array_builder_.capacity() > array_builder_.length());
+ array_builder_.Add(element);
}
+ FixedArrayBuilder array_builder_;
Handle<String> subject_;
- Handle<FixedArray> parts_;
- int part_count_;
int character_count_;
bool is_ascii_;
};
@@ -2105,7 +2248,6 @@
}
-
// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
// limit, we can fix the size of tables.
static const int kBMMaxShift = 0xff;
@@ -2869,6 +3011,475 @@
}
+// Two smis before and after the match, for very long strings.
+const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+
+static void SetLastMatchInfoNoCaptures(Handle<String> subject,
+ Handle<JSArray> last_match_info,
+ int match_start,
+ int match_end) {
+ // Fill last_match_info with a single capture.
+ last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
+ AssertNoAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(last_match_info->elements());
+ RegExpImpl::SetLastCaptureCount(elements, 2);
+ RegExpImpl::SetLastInput(elements, *subject);
+ RegExpImpl::SetLastSubject(elements, *subject);
+ RegExpImpl::SetCapture(elements, 0, match_start);
+ RegExpImpl::SetCapture(elements, 1, match_end);
+}
+
+
+template <typename schar>
+static bool SearchCharMultiple(Vector<schar> subject,
+ String* pattern,
+ schar pattern_char,
+ FixedArrayBuilder* builder,
+ int* match_pos) {
+ // Position of last match.
+ int pos = *match_pos;
+ int subject_length = subject.length();
+ while (pos < subject_length) {
+ int match_end = pos + 1;
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ int new_pos = SingleCharIndexOf(subject, pattern_char, match_end);
+ if (new_pos >= 0) {
+ // Match has been found.
+ if (new_pos > match_end) {
+ ReplacementStringBuilder::AddSubjectSlice(builder, match_end, new_pos);
+ }
+ pos = new_pos;
+ builder->Add(pattern);
+ } else {
+ break;
+ }
+ }
+ if (pos + 1 < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(builder, pos + 1, subject_length);
+ }
+ *match_pos = pos;
+ return true;
+}
+
+
+static bool SearchCharMultiple(Handle<String> subject,
+ Handle<String> pattern,
+ Handle<JSArray> last_match_info,
+ FixedArrayBuilder* builder) {
+ ASSERT(subject->IsFlat());
+ ASSERT_EQ(1, pattern->length());
+ uc16 pattern_char = pattern->Get(0);
+ // Treating position before first as initial "previous match position".
+ int match_pos = -1;
+
+ for (;;) { // Break when search complete.
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ AssertNoAllocation no_gc;
+ if (subject->IsAsciiRepresentation()) {
+ if (pattern_char > String::kMaxAsciiCharCode) {
+ break;
+ }
+ Vector<const char> subject_vector = subject->ToAsciiVector();
+ char pattern_ascii_char = static_cast<char>(pattern_char);
+ bool complete = SearchCharMultiple<const char>(subject_vector,
+ *pattern,
+ pattern_ascii_char,
+ builder,
+ &match_pos);
+ if (complete) break;
+ } else {
+ Vector<const uc16> subject_vector = subject->ToUC16Vector();
+ bool complete = SearchCharMultiple<const uc16>(subject_vector,
+ *pattern,
+ pattern_char,
+ builder,
+ &match_pos);
+ if (complete) break;
+ }
+ }
+
+ if (match_pos >= 0) {
+ SetLastMatchInfoNoCaptures(subject,
+ last_match_info,
+ match_pos,
+ match_pos + 1);
+ return true;
+ }
+ return false; // No matches at all.
+}
+
+
+template <typename schar, typename pchar>
+static bool SearchStringMultiple(Vector<schar> subject,
+ String* pattern,
+ Vector<pchar> pattern_string,
+ FixedArrayBuilder* builder,
+ int* match_pos) {
+ int pos = *match_pos;
+ int subject_length = subject.length();
+ int pattern_length = pattern_string.length();
+ int max_search_start = subject_length - pattern_length;
+ bool is_ascii = (sizeof(schar) == 1);
+ StringSearchStrategy strategy =
+ InitializeStringSearch(pattern_string, is_ascii);
+ switch (strategy) {
+ case SEARCH_FAIL: return false;
+ case SEARCH_SHORT:
+ while (pos <= max_search_start) {
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ // Position of end of previous match.
+ int match_end = pos + pattern_length;
+ int new_pos = SimpleIndexOf(subject, pattern_string, match_end);
+ if (new_pos >= 0) {
+ // A match.
+ if (new_pos > match_end) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ new_pos);
+ }
+ pos = new_pos;
+ builder->Add(pattern);
+ } else {
+ break;
+ }
+ }
+ break;
+ case SEARCH_LONG:
+ while (pos <= max_search_start) {
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ int new_pos = ComplexIndexOf(subject,
+ pattern_string,
+ pos + pattern_length);
+ if (new_pos >= 0) {
+ // A match has been found.
+ if (new_pos > pos) {
+ ReplacementStringBuilder::AddSubjectSlice(builder, pos, new_pos);
+ }
+ pos = new_pos;
+ builder->Add(pattern);
+ } else {
+ break;
+ }
+ }
+ break;
+ }
+ if (pos < max_search_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ pos + pattern_length,
+ subject_length);
+ }
+ *match_pos = pos;
+ return true;
+}
+
+
+static bool SearchStringMultiple(Handle<String> subject,
+ Handle<String> pattern,
+ Handle<JSArray> last_match_info,
+ FixedArrayBuilder* builder) {
+ ASSERT(subject->IsFlat());
+ ASSERT(pattern->IsFlat());
+ ASSERT(pattern->length() > 1);
+
+ // Treating as if a previous match was before first character.
+ int match_pos = -pattern->length();
+
+ for (;;) { // Break when search complete.
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ AssertNoAllocation no_gc;
+ if (subject->IsAsciiRepresentation()) {
+ Vector<const char> subject_vector = subject->ToAsciiVector();
+ if (pattern->IsAsciiRepresentation()) {
+ if (SearchStringMultiple(subject_vector,
+ *pattern,
+ pattern->ToAsciiVector(),
+ builder,
+ &match_pos)) break;
+ } else {
+ if (SearchStringMultiple(subject_vector,
+ *pattern,
+ pattern->ToUC16Vector(),
+ builder,
+ &match_pos)) break;
+ }
+ } else {
+ Vector<const uc16> subject_vector = subject->ToUC16Vector();
+ if (pattern->IsAsciiRepresentation()) {
+ if (SearchStringMultiple(subject_vector,
+ *pattern,
+ pattern->ToAsciiVector(),
+ builder,
+ &match_pos)) break;
+ } else {
+ if (SearchStringMultiple(subject_vector,
+ *pattern,
+ pattern->ToUC16Vector(),
+ builder,
+ &match_pos)) break;
+ }
+ }
+ }
+
+ if (match_pos >= 0) {
+ SetLastMatchInfoNoCaptures(subject,
+ last_match_info,
+ match_pos,
+ match_pos + pattern->length());
+ return true;
+ }
+ return false; // No matches at all.
+}
+
+
+static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_array,
+ FixedArrayBuilder* builder) {
+ ASSERT(subject->IsFlat());
+ int match_start = -1;
+ int match_end = 0;
+ int pos = 0;
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+ if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+ OffsetsVector registers(required_registers);
+ Vector<int> register_vector(registers.vector(), registers.length());
+ int subject_length = subject->length();
+
+ for (;;) { // Break on failure, return on exception.
+ RegExpImpl::IrregexpResult result =
+ RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ pos,
+ register_vector);
+ if (result == RegExpImpl::RE_SUCCESS) {
+ match_start = register_vector[0];
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ match_start);
+ }
+ match_end = register_vector[1];
+ HandleScope loop_scope;
+ builder->Add(*Factory::NewSubString(subject, match_start, match_end));
+ if (match_start != match_end) {
+ pos = match_end;
+ } else {
+ pos = match_end + 1;
+ if (pos > subject_length) break;
+ }
+ } else if (result == RegExpImpl::RE_FAILURE) {
+ break;
+ } else {
+ ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
+ return result;
+ }
+ }
+
+ if (match_start >= 0) {
+ if (match_end < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ subject_length);
+ }
+ SetLastMatchInfoNoCaptures(subject,
+ last_match_array,
+ match_start,
+ match_end);
+ return RegExpImpl::RE_SUCCESS;
+ } else {
+ return RegExpImpl::RE_FAILURE; // No matches at all.
+ }
+}
+
+
+static RegExpImpl::IrregexpResult SearchRegExpMultiple(
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_array,
+ FixedArrayBuilder* builder) {
+
+ ASSERT(subject->IsFlat());
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+ if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+ OffsetsVector registers(required_registers);
+ Vector<int> register_vector(registers.vector(), registers.length());
+
+ RegExpImpl::IrregexpResult result =
+ RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ 0,
+ register_vector);
+
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
+
+ // Position to search from.
+ int pos = 0;
+ // End of previous match. Differs from pos if match was empty.
+ int match_end = 0;
+ if (result == RegExpImpl::RE_SUCCESS) {
+ // Need to keep a copy of the previous match for creating last_match_info
+ // at the end, so we have two vectors that we swap between.
+ OffsetsVector registers2(required_registers);
+ Vector<int> prev_register_vector(registers2.vector(), registers2.length());
+
+ do {
+ int match_start = register_vector[0];
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ match_start);
+ }
+ match_end = register_vector[1];
+
+ {
+ // Avoid accumulating new handles inside loop.
+ HandleScope temp_scope;
+ // Arguments array to replace function is match, captures, index and
+ // subject, i.e., 3 + capture count in total.
+ Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
+ elements->set(0, *Factory::NewSubString(subject,
+ match_start,
+ match_end));
+ for (int i = 1; i <= capture_count; i++) {
+ int start = register_vector[i * 2];
+ if (start >= 0) {
+ int end = register_vector[i * 2 + 1];
+ ASSERT(start <= end);
+ Handle<String> substring = Factory::NewSubString(subject,
+ start,
+ end);
+ elements->set(i, *substring);
+ } else {
+ ASSERT(register_vector[i * 2 + 1] < 0);
+ elements->set(i, Heap::undefined_value());
+ }
+ }
+ elements->set(capture_count + 1, Smi::FromInt(match_start));
+ elements->set(capture_count + 2, *subject);
+ builder->Add(*Factory::NewJSArrayWithElements(elements));
+ }
+ // Swap register vectors, so the last successful match is in
+ // prev_register_vector.
+ Vector<int> tmp = prev_register_vector;
+ prev_register_vector = register_vector;
+ register_vector = tmp;
+
+ if (match_end > match_start) {
+ pos = match_end;
+ } else {
+ pos = match_end + 1;
+ if (pos > subject_length) {
+ break;
+ }
+ }
+
+ result = RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ pos,
+ register_vector);
+ } while (result == RegExpImpl::RE_SUCCESS);
+
+ if (result != RegExpImpl::RE_EXCEPTION) {
+ // Finished matching, with at least one match.
+ if (match_end < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ subject_length);
+ }
+
+ int last_match_capture_count = (capture_count + 1) * 2;
+ int last_match_array_size =
+ last_match_capture_count + RegExpImpl::kLastMatchOverhead;
+ last_match_array->EnsureSize(last_match_array_size);
+ AssertNoAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(last_match_array->elements());
+ RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
+ RegExpImpl::SetLastSubject(elements, *subject);
+ RegExpImpl::SetLastInput(elements, *subject);
+ for (int i = 0; i < last_match_capture_count; i++) {
+ RegExpImpl::SetCapture(elements, i, prev_register_vector[i]);
+ }
+ return RegExpImpl::RE_SUCCESS;
+ }
+ }
+ // No matches at all, return failure or exception result directly.
+ return result;
+}
+
+
+static Object* Runtime_RegExpExecMultiple(Arguments args) {
+ ASSERT(args.length() == 4);
+ HandleScope handles;
+
+ CONVERT_ARG_CHECKED(String, subject, 1);
+ if (!subject->IsFlat()) { FlattenString(subject); }
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
+ CONVERT_ARG_CHECKED(JSArray, result_array, 3);
+
+ ASSERT(last_match_info->HasFastElements());
+ ASSERT(regexp->GetFlags().is_global());
+ Handle<FixedArray> result_elements;
+ if (result_array->HasFastElements()) {
+ result_elements =
+ Handle<FixedArray>(FixedArray::cast(result_array->elements()));
+ } else {
+ result_elements = Factory::NewFixedArrayWithHoles(16);
+ }
+ FixedArrayBuilder builder(result_elements);
+
+ if (regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> pattern(
+ String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
+ int pattern_length = pattern->length();
+ if (pattern_length == 1) {
+ if (SearchCharMultiple(subject, pattern, last_match_info, &builder)) {
+ return *builder.ToJSArray(result_array);
+ }
+ return Heap::null_value();
+ }
+
+ if (!pattern->IsFlat()) FlattenString(pattern);
+ if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
+ return *builder.ToJSArray(result_array);
+ }
+ return Heap::null_value();
+ }
+
+ ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+
+ RegExpImpl::IrregexpResult result;
+ if (regexp->CaptureCount() == 0) {
+ result = SearchRegExpNoCaptureMultiple(subject,
+ regexp,
+ last_match_info,
+ &builder);
+ } else {
+ result = SearchRegExpMultiple(subject, regexp, last_match_info, &builder);
+ }
+ if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
+ if (result == RegExpImpl::RE_FAILURE) return Heap::null_value();
+ ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
+ return Failure::Exception();
+}
+
+
static Object* Runtime_NumberToRadixString(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -3834,11 +4445,66 @@
}
+static bool AreDigits(const char*s, int from, int to) {
+ for (int i = from; i < to; i++) {
+ if (s[i] < '0' || s[i] > '9') return false;
+ }
+
+ return true;
+}
+
+
+static int ParseDecimalInteger(const char*s, int from, int to) {
+ ASSERT(to - from < 10); // Overflow is not possible.
+ ASSERT(from < to);
+ int d = s[from] - '0';
+
+ for (int i = from + 1; i < to; i++) {
+ d = 10 * d + (s[i] - '0');
+ }
+
+ return d;
+}
+
+
static Object* Runtime_StringToNumber(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, subject, args[0]);
subject->TryFlatten();
+
+ // Fast case: short integer or some sorts of junk values.
+ int len = subject->length();
+ if (subject->IsSeqAsciiString()) {
+ if (len == 0) return Smi::FromInt(0);
+
+ char const* data = SeqAsciiString::cast(subject)->GetChars();
+ bool minus = (data[0] == '-');
+ int start_pos = (minus ? 1 : 0);
+
+ if (start_pos == len) {
+ return Heap::nan_value();
+ } else if (data[start_pos] > '9') {
+ // Fast check for a junk value. A valid string may start from a
+ // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
+ // the 'I' character ('Infinity'). All of that have codes not greater than
+ // '9' except 'I'.
+ if (data[start_pos] != 'I') {
+ return Heap::nan_value();
+ }
+ } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
+ // The maximal/minimal smi has 10 digits. If the string has less digits we
+ // know it will fit into the smi-data type.
+ int d = ParseDecimalInteger(data, start_pos, len);
+ if (minus) {
+ if (d == 0) return Heap::minus_zero_value();
+ d = -d;
+ }
+ return Smi::FromInt(d);
+ }
+ }
+
+ // Slower case.
return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
}
@@ -4075,49 +4741,9 @@
s->TryFlatten();
- int len = s->length();
- int i;
-
- // Skip leading white space.
- for (i = 0; i < len && Scanner::kIsWhiteSpace.get(s->Get(i)); i++) ;
- if (i == len) return Heap::nan_value();
-
- // Compute the sign (default to +).
- int sign = 1;
- if (s->Get(i) == '-') {
- sign = -1;
- i++;
- } else if (s->Get(i) == '+') {
- i++;
- }
-
- // Compute the radix if 0.
- if (radix == 0) {
- radix = 10;
- if (i < len && s->Get(i) == '0') {
- radix = 8;
- if (i + 1 < len) {
- int c = s->Get(i + 1);
- if (c == 'x' || c == 'X') {
- radix = 16;
- i += 2;
- }
- }
- }
- } else if (radix == 16) {
- // Allow 0x or 0X prefix if radix is 16.
- if (i + 1 < len && s->Get(i) == '0') {
- int c = s->Get(i + 1);
- if (c == 'x' || c == 'X') i += 2;
- }
- }
-
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
- double value;
- int end_index = StringToInt(s, i, radix, &value);
- if (end_index != i) {
- return Heap::NumberFromDouble(sign * value);
- }
+ RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
+ double value = StringToInt(s, radix);
+ return Heap::NumberFromDouble(value);
return Heap::nan_value();
}
@@ -6474,21 +7100,6 @@
}
-static Object* Runtime_NumberIsFinite(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_CHECKED(value, args[0]);
- Object* result;
- if (isnan(value) || (fpclassify(value) == FP_INFINITE)) {
- result = Heap::false_value();
- } else {
- result = Heap::true_value();
- }
- return result;
-}
-
-
static Object* Runtime_GlobalReceiver(Arguments args) {
ASSERT(args.length() == 1);
Object* global = args[0];
diff --git a/src/runtime.h b/src/runtime.h
index 4175902..c079345 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -153,6 +153,8 @@
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
F(RegExpExec, 4, 1) \
+ F(RegExpExecMultiple, 4, 1) \
+ F(RegExpInitializeObject, 5, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -208,7 +210,6 @@
F(DateYMDFromTime, 2, 1) \
\
/* Numbers */ \
- F(NumberIsFinite, 1, 1) \
\
/* Globals */ \
F(CompileString, 2, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index d777041..3f23ce5 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -577,11 +577,11 @@
if (IS_NUMBER(x)) {
if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
// x is +0 and y is -0 or vice versa
- if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
+ if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
return false;
}
- return x == y;
+ return x == y;
}
if (IS_STRING(x)) return %StringEquals(x, y);
if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
diff --git a/src/string.js b/src/string.js
index ca438fd..302dcde 100644
--- a/src/string.js
+++ b/src/string.js
@@ -405,97 +405,95 @@
builder.addSpecialSlice(start, end);
};
+// TODO(lrn): This array will survive indefinitely if replace is never
+// called again. However, it will be empty, since the contents are cleared
+// in the finally block.
+var reusableReplaceArray = $Array(16);
// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace. The function application
-// must be interleaved with the regexp matching (contrary to ECMA-262
-// 15.5.4.11) to mimic SpiderMonkey and KJS behavior when the function uses
-// the static properties of the RegExp constructor. Example:
-// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
-// should be 'abcd' and not 'dddd' (or anything else).
+// function application in String.prototype.replace.
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
-
- var result = new ReplaceResultBuilder(subject);
- // There's at least one match. If the regexp is global, we have to loop
- // over all matches. The loop is not in C++ code here like the one in
- // RegExp.prototype.exec, because of the interleaved function application.
- // Unfortunately, that means this code is nearly duplicated, here and in
- // jsregexp.cc.
if (regexp.global) {
- var previous = 0;
- var startOfMatch;
- if (NUMBER_OF_CAPTURES(matchInfo) == 2) {
- // Both branches contain essentially the same loop except for the call
- // to the replace function. The branch is put outside of the loop for
- // speed
- do {
- startOfMatch = matchInfo[CAPTURE0];
- result.addSpecialSlice(previous, startOfMatch);
- previous = matchInfo[CAPTURE1];
- var match = SubString(subject, startOfMatch, previous);
- // Don't call directly to avoid exposing the built-in global object.
- result.add(replace.call(null, match, startOfMatch, subject));
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- // Continue with the next match.
- // Increment previous if we matched an empty string, as per ECMA-262
- // 15.5.4.10.
- if (previous == startOfMatch) {
- // Add the skipped character to the output, if any.
- if (previous < subject.length) {
- result.addSpecialSlice(previous, previous + 1);
- }
- previous++;
- // Per ECMA-262 15.10.6.2, if the previous index is greater than the
- // string length, there is no match
- if (previous > subject.length) {
- return result.generate();
- }
- }
- matchInfo = DoRegExpExec(regexp, subject, previous);
- } while (!IS_NULL(matchInfo));
+ var resultArray = reusableReplaceArray;
+ if (resultArray) {
+ reusableReplaceArray = null;
} else {
- do {
- startOfMatch = matchInfo[CAPTURE0];
- result.addSpecialSlice(previous, startOfMatch);
- previous = matchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, matchInfo, subject));
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- // Continue with the next match.
- // Increment previous if we matched an empty string, as per ECMA-262
- // 15.5.4.10.
- if (previous == startOfMatch) {
- // Add the skipped character to the output, if any.
- if (previous < subject.length) {
- result.addSpecialSlice(previous, previous + 1);
- }
- previous++;
- // Per ECMA-262 15.10.6.2, if the previous index is greater than the
- // string length, there is no match
- if (previous > subject.length) {
- return result.generate();
- }
- }
- matchInfo = DoRegExpExec(regexp, subject, previous);
- } while (!IS_NULL(matchInfo));
+ // Inside a nested replace (replace called from the replacement function
+ // of another replace) or we have failed to set the reusable array
+ // back due to an exception in a replacement function. Create a new
+ // array to use in the future, or until the original is written back.
+ resultArray = $Array(16);
}
-
- // Tack on the final right substring after the last match.
- result.addSpecialSlice(previous, subject.length);
-
+ try {
+ // Must handle exceptions thrown by the replace functions correctly,
+ // including unregistering global regexps.
+ var res = %RegExpExecMultiple(regexp,
+ subject,
+ lastMatchInfo,
+ resultArray);
+ regexp.lastIndex = 0;
+ if (IS_NULL(res)) {
+ // No matches at all.
+ return subject;
+ }
+ var len = res.length;
+ var i = 0;
+ if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+ var match_start = 0;
+ while (i < len) {
+ var elem = res[i];
+ if (%_IsSmi(elem)) {
+ if (elem > 0) {
+ match_start = (elem >> 11) + (elem & 0x7ff);
+ } else {
+ match_start = res[++i] - elem;
+ }
+ } else {
+ var func_result = replace.call(null, elem, match_start, subject);
+ if (!IS_STRING(func_result)) {
+ func_result = NonStringToString(func_result);
+ }
+ res[i] = func_result;
+ match_start += elem.length;
+ }
+ i++;
+ }
+ } else {
+ while (i < len) {
+ var elem = res[i];
+ if (!%_IsSmi(elem)) {
+ // elem must be an Array.
+ // Use the apply argument as backing for global RegExp properties.
+ lastMatchInfoOverride = elem;
+ var func_result = replace.apply(null, elem);
+ if (!IS_STRING(func_result)) {
+ func_result = NonStringToString(func_result);
+ }
+ res[i] = func_result;
+ }
+ i++;
+ }
+ }
+ var result = new ReplaceResultBuilder(subject, res);
+ return result.generate();
+ } finally {
+ lastMatchInfoOverride = null;
+ resultArray.length = 0;
+ reusableReplaceArray = resultArray;
+ }
} else { // Not a global regexp, no need to loop.
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) return subject;
+
+ var result = new ReplaceResultBuilder(subject);
result.addSpecialSlice(0, matchInfo[CAPTURE0]);
var endOfMatch = matchInfo[CAPTURE1];
result.add(ApplyReplacementFunction(replace, matchInfo, subject));
// Can't use matchInfo any more from here, since the function could
// overwrite it.
result.addSpecialSlice(endOfMatch, subject.length);
+ return result.generate();
}
-
- return result.generate();
}
@@ -522,7 +520,7 @@
// ECMA-262 section 15.5.4.12
-function StringSearch(re) {
+function StringSearch(re) {
var regexp = new $RegExp(re);
var s = TO_STRING_INLINE(this);
var match = DoRegExpExec(regexp, s, 0);
@@ -894,8 +892,11 @@
// ReplaceResultBuilder support.
function ReplaceResultBuilder(str) {
- this.__proto__ = void 0;
- this.elements = new $Array();
+ if (%_ArgumentsLength() > 1) {
+ this.elements = %_Arguments(1);
+ } else {
+ this.elements = new $Array();
+ }
this.special_string = str;
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ce587bc..95877fb 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -103,7 +103,7 @@
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -122,7 +122,7 @@
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -141,7 +141,7 @@
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -158,7 +158,7 @@
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -187,7 +187,7 @@
name,
is_dont_delete);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -205,7 +205,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -224,7 +224,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -242,7 +242,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -261,7 +261,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -279,7 +279,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -296,7 +296,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -313,7 +313,7 @@
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -332,7 +332,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -349,7 +349,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -367,7 +367,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreCallback(receiver, callback, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -384,7 +384,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreInterceptor(receiver, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -401,7 +401,8 @@
KeyedStoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(
+ Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -445,7 +446,7 @@
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -482,7 +483,7 @@
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -518,7 +519,7 @@
name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -556,7 +557,7 @@
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
- LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -701,8 +702,8 @@
if (result->IsCode()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -939,8 +940,8 @@
Counters::call_initialize_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -957,8 +958,8 @@
Counters::call_premonomorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -973,8 +974,8 @@
Counters::call_normal_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -989,8 +990,8 @@
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -1005,7 +1006,8 @@
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_MISS_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -1019,8 +1021,8 @@
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -1036,8 +1038,8 @@
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
- code, code->arguments_count()));
+ PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -1124,7 +1126,7 @@
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+ PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
}
return result;
}
diff --git a/src/type-info.cc b/src/type-info.cc
index b1bde59..3fc929d 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -41,6 +41,8 @@
info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
? TypeInfo::Integer32()
: TypeInfo::Double();
+ } else if (value->IsString()) {
+ info = TypeInfo::String();
} else {
info = TypeInfo::Unknown();
}
diff --git a/src/type-info.h b/src/type-info.h
index 1d82634..568437a 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -156,6 +156,11 @@
return ((type_ & kDoubleType) == kDoubleType);
}
+ inline bool IsString() {
+ ASSERT(type_ != kUninitializedType);
+ return ((type_ & kStringType) == kStringType);
+ }
+
inline bool IsUninitialized() {
return type_ == kUninitializedType;
}
diff --git a/src/uri.js b/src/uri.js
index 5af71b6..3adab83 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -244,7 +244,7 @@
if (cc == 61) return true;
// ?@
if (63 <= cc && cc <= 64) return true;
-
+
return false;
};
var string = ToString(uri);
@@ -268,7 +268,7 @@
if (65 <= cc && cc <= 90) return true;
// 0 - 9
if (48 <= cc && cc <= 57) return true;
-
+
return false;
}
@@ -293,7 +293,7 @@
if (cc == 95) return true;
// ~
if (cc == 126) return true;
-
+
return false;
};
@@ -316,7 +316,7 @@
if (cc == 95) return true;
// ~
if (cc == 126) return true;
-
+
return false;
};
@@ -327,14 +327,14 @@
function HexValueOf(c) {
var code = c.charCodeAt(0);
-
+
// 0-9
if (code >= 48 && code <= 57) return code - 48;
// A-F
if (code >= 65 && code <= 70) return code - 55;
// a-f
if (code >= 97 && code <= 102) return code - 87;
-
+
return -1;
}
diff --git a/src/utils.h b/src/utils.h
index 8ff1f9b..fa24947 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -597,6 +597,27 @@
}
+// Copies data from |src| to |dst|. The data spans MUST not overlap.
+inline void CopyWords(Object** dst, Object** src, int num_words) {
+ ASSERT(Min(dst, src) + num_words <= Max(dst, src));
+ ASSERT(num_words > 0);
+
+ // Use block copying memcpy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const int kBlockCopyLimit = 16;
+
+ if (num_words >= kBlockCopyLimit) {
+ memcpy(dst, src, num_words * kPointerSize);
+ } else {
+ int remaining = num_words;
+ do {
+ remaining--;
+ *dst++ = *src++;
+ } while (remaining > 0);
+ }
+}
+
+
// Calculate 10^exponent.
int TenToThe(int exponent);
@@ -636,7 +657,7 @@
return dest;
}
-
} } // namespace v8::internal
+
#endif // V8_UTILS_H_
diff --git a/src/v8.cc b/src/v8.cc
index 5af2003..2fe672d 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -60,6 +60,14 @@
// Enable logging before setting up the heap
Logger::Setup();
+ CpuProfiler::Setup();
+
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (FLAG_prof && FLAG_prof_auto) {
+ CpuProfiler::StartProfiling("internal.auto");
+ }
+#endif
+
// Setup the platform OS support.
OS::Setup();
@@ -135,6 +143,12 @@
void V8::TearDown() {
if (!has_been_setup_ || has_been_disposed_) return;
+#ifdef ENABLE_CPP_PROFILES_PROCESSOR
+ if (FLAG_prof && FLAG_prof_auto) {
+ CpuProfiler::StopProfiling("internal.auto");
+ }
+#endif
+
OProfileAgent::TearDown();
if (FLAG_preemption) {
@@ -148,6 +162,9 @@
Top::TearDown();
Heap::TearDown();
+
+ CpuProfiler::TearDown();
+
Logger::TearDown();
is_running_ = false;
diff --git a/src/v8.h b/src/v8.h
index d58f30f..4fc44c2 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -67,6 +67,7 @@
#include "spaces-inl.h"
#include "heap-inl.h"
#include "log-inl.h"
+#include "cpu-profiler-inl.h"
#include "handles-inl.h"
namespace v8 {
diff --git a/src/v8natives.js b/src/v8natives.js
index 4a8dfab..86d3ad8 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -82,7 +82,10 @@
// ECMA 262 - 15.1.5
function GlobalIsFinite(number) {
- return %NumberIsFinite(ToNumber(number));
+ if (!IS_NUMBER(number)) number = ToNumber(number);
+
+ // NaN - NaN == NaN, Infinity - Infinity == NaN, -Infinity - -Infinity == NaN.
+ return %_IsSmi(number) || number - number == 0;
}
@@ -482,7 +485,7 @@
// ES5 section 8.12.1.
function GetOwnProperty(obj, p) {
var desc = new PropertyDescriptor();
-
+
// An array with:
// obj is a data property [false, value, Writeable, Enumerable, Configurable]
// obj is an accessor [true, Get, Set, Enumerable, Configurable]
@@ -522,7 +525,7 @@
}
-// ES5 8.12.9.
+// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
var current = GetOwnProperty(obj, p);
var extensible = %IsExtensible(ToObject(obj));
@@ -558,7 +561,7 @@
}
}
- // Send flags - enumerable and configurable are common - writable is
+ // Send flags - enumerable and configurable are common - writable is
// only send to the data descriptor.
// Take special care if enumerable and configurable is not defined on
// desc (we need to preserve the existing values from current).
@@ -602,7 +605,7 @@
}
-// ES5 section 15.2.3.3
+// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
!IS_UNDETECTABLE(obj))
diff --git a/src/version.cc b/src/version.cc
index 9d1aa72..4be077e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 0
-#define PATCH_LEVEL 3
+#define BUILD_NUMBER 1
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index cedb740..c10bc49 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -119,8 +119,8 @@
Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
if (!code->IsCode()) return;
- LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
+ PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index dd8015f..e035104 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -3129,14 +3129,16 @@
}
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub. The result is left in dst.
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number, and call the specialized add
+// or subtract stub. The result is left in dst.
class DeferredPrefixCountOperation: public DeferredCode {
public:
- DeferredPrefixCountOperation(Register dst, bool is_increment)
- : dst_(dst), is_increment_(is_increment) {
+ DeferredPrefixCountOperation(Register dst,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
set_comment("[ DeferredCountOperation");
}
@@ -3145,32 +3147,45 @@
private:
Register dst_;
bool is_increment_;
+ TypeInfo input_type_;
};
void DeferredPrefixCountOperation::Generate() {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax);
- __ Push(Smi::FromInt(1));
- if (is_increment_) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ Register left;
+ if (input_type_.IsNumber()) {
+ left = dst_;
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ left = rax;
}
+
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
if (!dst_.is(rax)) __ movq(dst_, rax);
}
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation and call
-// into the runtime to convert the argument to a number. Update the
-// original value in old. Call the specialized add or subtract stub.
-// The result is left in dst.
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number. Update the original value in
+// old. Call the specialized add or subtract stub. The result is
+// left in dst.
class DeferredPostfixCountOperation: public DeferredCode {
public:
- DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
- : dst_(dst), old_(old), is_increment_(is_increment) {
+ DeferredPostfixCountOperation(Register dst,
+ Register old,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst),
+ old_(old),
+ is_increment_(is_increment),
+ input_type_(input_type) {
set_comment("[ DeferredCountOperation");
}
@@ -3180,24 +3195,28 @@
Register dst_;
Register old_;
bool is_increment_;
+ TypeInfo input_type_;
};
void DeferredPostfixCountOperation::Generate() {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- // Save the result of ToNumber to use as the old value.
- __ push(rax);
-
- // Call the runtime for the addition or subtraction.
- __ push(rax);
- __ Push(Smi::FromInt(1));
- if (is_increment_) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ Register left;
+ if (input_type_.IsNumber()) {
+ __ push(dst_); // Save the input to use as the old value.
+ left = dst_;
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(rax); // Save the result of ToNumber to use as the old value.
+ left = rax;
}
+
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
if (!dst_.is(rax)) __ movq(dst_, rax);
__ pop(old_);
}
@@ -3238,6 +3257,14 @@
old_value = allocator_->Allocate();
ASSERT(old_value.is_valid());
__ movq(old_value.reg(), new_value.reg());
+
+ // The return value for postfix operations is ToNumber(input).
+ // Keep more precise type info if the input is some kind of
+ // number already. If the input is not a number we have to wait
+ // for the deferred code to convert it.
+ if (new_value.type_info().IsNumber()) {
+ old_value.set_type_info(new_value.type_info());
+ }
}
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
@@ -3246,10 +3273,12 @@
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
old_value.reg(),
- is_increment);
+ is_increment,
+ new_value.type_info());
} else {
deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment);
+ is_increment,
+ new_value.type_info());
}
__ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
@@ -3267,6 +3296,15 @@
__ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
+ // Postfix count operations return their input converted to
+ // number. The case when the input is already a number is covered
+ // above in the allocation code for old_value.
+ if (is_postfix && !new_value.type_info().IsNumber()) {
+ old_value.set_type_info(TypeInfo::Number());
+ }
+
+ new_value.set_type_info(TypeInfo::Number());
+
// Postfix: store the old value in the allocated slot under the
// reference.
if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -3734,12 +3772,32 @@
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- // ArgumentsAccessStub takes the parameter count as an input argument
- // in register eax. Create a constant result for it.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to the arguments.length.
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
- Result result = frame_->CallStub(&stub, &count);
+
+ Result fp = allocator_->Allocate();
+ Result result = allocator_->Allocate();
+ ASSERT(fp.is_valid() && result.is_valid());
+
+ Label exit;
+
+ // Get the number of formal parameters.
+ __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ movq(result.reg(),
+ Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ result.set_type_info(TypeInfo::Smi());
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(result.reg(), "Computed arguments.length is not a smi.");
+ }
frame_->Push(&result);
}
@@ -5254,8 +5312,13 @@
Result left = frame_->Pop();
if (op == Token::ADD) {
- bool left_is_string = left.is_constant() && left.handle()->IsString();
- bool right_is_string = right.is_constant() && right.handle()->IsString();
+ const bool left_is_string = left.type_info().IsString();
+ const bool right_is_string = right.type_info().IsString();
+ // Make sure constant strings have string type info.
+ ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+ left_is_string);
+ ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+ right_is_string);
if (left_is_string || right_is_string) {
frame_->Push(&left);
frame_->Push(&right);
@@ -5264,7 +5327,8 @@
if (right_is_string) {
// TODO(lrn): if both are constant strings
// -- do a compile time cons, if allocation during codegen is allowed.
- answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
} else {
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
@@ -5273,6 +5337,7 @@
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
}
+ answer.set_type_info(TypeInfo::String());
frame_->Push(&answer);
return;
}
@@ -5358,10 +5423,13 @@
: TypeInfo::Number();
break;
case Token::ADD:
- // Result could be a string or a number. Check types of inputs.
- result_type = operands_type.IsNumber()
- ? TypeInfo::Number()
- : TypeInfo::Unknown();
+ if (operands_type.IsNumber()) {
+ result_type = TypeInfo::Number();
+ } else if (operands_type.IsString()) {
+ result_type = TypeInfo::String();
+ } else {
+ result_type = TypeInfo::Unknown();
+ }
break;
case Token::SUB:
case Token::MUL:
@@ -7739,23 +7807,6 @@
}
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame and return it.
- // Otherwise nothing to do: The number of formal parameters has already been
- // passed in register eax by calling function. Just return it.
- __ cmovq(equal, rax,
- Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ret(0);
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain next handler, frame pointer, state and
// return address in that order.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index a8e2fdf..2781a84 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1660,6 +1660,15 @@
}
+void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok);
+ Assert(equal, msg);
+ bind(&ok);
+}
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index bbb6e21..b5bb636 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -503,6 +503,9 @@
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object, const char* msg);
+ // Abort execution if argument is not a smi. Used in debug code.
+ void AbortIfNotSmi(Register object, const char* msg);
+
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 3fe6be2..c998f13 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -905,7 +905,7 @@
NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- LOG(RegExpCodeCreateEvent(*code, *source));
+ PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}