Version 2.4.7
Changed the command-line flag --max-new-space-size to be in kB and the flag --max-old-space-size to be in MB (previously they were in bytes).
Added Debug::CancelDebugBreak to the debugger API.
Fixed a bug in getters for negative numeric property names (https://bugs.webkit.org/show_bug.cgi?id=46689).
Performance improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@5563 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 7fae8d4..7883bf7 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -100,6 +100,7 @@
serialize.cc
snapshot-common.cc
spaces.cc
+ string-search.cc
string-stream.cc
stub-cache.cc
token.cc
diff --git a/src/api.cc b/src/api.cc
index 5f480c9..b7d85c6 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -134,27 +134,27 @@
heap_stats.new_space_size = &new_space_size;
int new_space_capacity;
heap_stats.new_space_capacity = &new_space_capacity;
- int old_pointer_space_size;
+ intptr_t old_pointer_space_size;
heap_stats.old_pointer_space_size = &old_pointer_space_size;
- int old_pointer_space_capacity;
+ intptr_t old_pointer_space_capacity;
heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
- int old_data_space_size;
+ intptr_t old_data_space_size;
heap_stats.old_data_space_size = &old_data_space_size;
- int old_data_space_capacity;
+ intptr_t old_data_space_capacity;
heap_stats.old_data_space_capacity = &old_data_space_capacity;
- int code_space_size;
+ intptr_t code_space_size;
heap_stats.code_space_size = &code_space_size;
- int code_space_capacity;
+ intptr_t code_space_capacity;
heap_stats.code_space_capacity = &code_space_capacity;
- int map_space_size;
+ intptr_t map_space_size;
heap_stats.map_space_size = &map_space_size;
- int map_space_capacity;
+ intptr_t map_space_capacity;
heap_stats.map_space_capacity = &map_space_capacity;
- int cell_space_size;
+ intptr_t cell_space_size;
heap_stats.cell_space_size = &cell_space_size;
- int cell_space_capacity;
+ intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
- int lo_space_size;
+ intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
heap_stats.global_handle_count = &global_handle_count;
@@ -166,9 +166,9 @@
heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
int destroyed_global_handle_count;
heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
- int memory_allocator_size;
+ intptr_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size;
- int memory_allocator_capacity;
+ intptr_t memory_allocator_capacity;
heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
int objects_per_type[LAST_TYPE + 1] = {0};
heap_stats.objects_per_type = objects_per_type;
@@ -4265,6 +4265,11 @@
}
+void Debug::CancelDebugBreak() {
+ i::StackGuard::Continue(i::DEBUGBREAK);
+}
+
+
void Debug::DebugBreakForCommand(ClientData* data) {
if (!i::V8::IsRunning()) return;
i::Debugger::EnqueueDebugCommand(data);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d5a700c..eab4c6e 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1584,8 +1584,9 @@
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);
- // Check that the key is a smi.
- __ BranchOnNotSmi(r0, &slow);
+ // Check that the key is an array index, that is Uint32.
+ __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
+ __ b(ne, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 64262b2..40be9bb 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -294,7 +294,7 @@
} else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
- PrintF("%s: %lf \n", arg1, dvalue);
+ PrintF("%s: %f \n", arg1, dvalue);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -349,7 +349,8 @@
end = cur + words;
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d\n", cur, *cur, *cur);
+ PrintF(" 0x%08x: 0x%08x %10d\n",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
cur++;
}
} else if (strcmp(cmd, "disasm") == 0) {
@@ -382,7 +383,8 @@
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
cur += Instr::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -1061,7 +1063,7 @@
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instr* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- instr, format);
+ reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
}
@@ -2650,7 +2652,7 @@
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", instr, buffer.start());
+ PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == special_condition) {
DecodeUnconditional(instr);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 659f29c..97f9495 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1643,6 +1643,108 @@
}
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into r0.
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ BranchOnNotSmi(r0, ¬_smi);
+
+ // Do bitwise not or do nothing depending on the sign of the
+ // argument.
+ __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
+
+ // Add 1 or do nothing depending on the sign of the argument.
+ __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ b(mi, &slow);
+
+ // Smi case done.
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign.
+ __ bind(¬_smi);
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ tst(r1, Operand(HeapNumber::kSignMask));
+ __ b(ne, &negative_sign);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ eor(r1, r1, Operand(HeapNumber::kSignMask));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
+ __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // r2: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
diff --git a/src/assembler.cc b/src/assembler.cc
index 6a46f61..b6efdb9 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -465,7 +465,7 @@
void RelocInfo::Print() {
PrintF("%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
- PrintF(" (%s)", data_);
+ PrintF(" (%s)", reinterpret_cast<char*>(data_));
} else if (rmode_ == EMBEDDED_OBJECT) {
PrintF(" (");
target_object()->ShortPrint();
@@ -479,7 +479,7 @@
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
} else if (IsPosition(rmode_)) {
- PrintF(" (%d)", data());
+ PrintF(" (%" V8_PTR_PREFIX "d)", data());
}
PrintF("\n");
diff --git a/src/ast.h b/src/ast.h
index eadb310..fc34fd4 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -118,35 +118,38 @@
typedef ZoneList<Handle<Object> > ZoneObjectList;
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v); \
+ virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+ virtual type* As##type() { return this; }
+
+
class AstNode: public ZoneObject {
public:
- virtual ~AstNode() { }
- virtual void Accept(AstVisitor* v) = 0;
+#define DECLARE_TYPE_ENUM(type) k##type,
+ enum Type {
+ AST_NODE_LIST(DECLARE_TYPE_ENUM)
+ kInvalid = -1
+ };
+#undef DECLARE_TYPE_ENUM
- // Type testing & conversion.
+ virtual ~AstNode() { }
+
+ virtual void Accept(AstVisitor* v) = 0;
+ virtual Type node_type() const { return kInvalid; }
+
+ // Type testing & conversion functions overridden by concrete subclasses.
+#define DECLARE_NODE_FUNCTIONS(type) \
+ virtual type* As##type() { return NULL; }
+ AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+#undef DECLARE_NODE_FUNCTIONS
+
virtual Statement* AsStatement() { return NULL; }
- virtual Block* AsBlock() { return NULL; }
- virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
- virtual EmptyStatement* AsEmptyStatement() { return NULL; }
virtual Expression* AsExpression() { return NULL; }
- virtual Literal* AsLiteral() { return NULL; }
- virtual Slot* AsSlot() { return NULL; }
- virtual VariableProxy* AsVariableProxy() { return NULL; }
- virtual Property* AsProperty() { return NULL; }
- virtual Call* AsCall() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
- virtual ForStatement* AsForStatement() { return NULL; }
- virtual UnaryOperation* AsUnaryOperation() { return NULL; }
- virtual CountOperation* AsCountOperation() { return NULL; }
- virtual BinaryOperation* AsBinaryOperation() { return NULL; }
- virtual Assignment* AsAssignment() { return NULL; }
- virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
- virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
- virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
- virtual CompareOperation* AsCompareOperation() { return NULL; }
};
@@ -155,7 +158,6 @@
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
virtual Statement* AsStatement() { return this; }
- virtual ReturnStatement* AsReturnStatement() { return NULL; }
virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
virtual CountOperation* StatementAsCountOperation() { return NULL; }
@@ -313,9 +315,7 @@
public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
- virtual void Accept(AstVisitor* v);
-
- virtual Block* AsBlock() { return this; }
+ DECLARE_NODE_TYPE(Block)
virtual Assignment* StatementAsSimpleAssignment() {
if (statements_.length() != 1) return NULL;
@@ -349,7 +349,7 @@
ASSERT(fun == NULL || mode == Variable::VAR);
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Declaration)
VariableProxy* proxy() const { return proxy_; }
Variable::Mode mode() const { return mode_; }
@@ -390,13 +390,13 @@
public:
explicit inline DoWhileStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(DoWhileStatement)
+
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
}
- virtual void Accept(AstVisitor* v);
-
Expression* cond() const { return cond_; }
// Position where condition expression starts. We need it to make
@@ -414,13 +414,13 @@
public:
explicit WhileStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(WhileStatement)
+
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
}
- virtual void Accept(AstVisitor* v);
-
Expression* cond() const { return cond_; }
bool may_have_function_literal() const {
return may_have_function_literal_;
@@ -440,7 +440,7 @@
public:
explicit inline ForStatement(ZoneStringList* labels);
- virtual ForStatement* AsForStatement() { return this; }
+ DECLARE_NODE_TYPE(ForStatement)
void Initialize(Statement* init,
Expression* cond,
@@ -452,8 +452,6 @@
next_ = next;
}
- virtual void Accept(AstVisitor* v);
-
Statement* init() const { return init_; }
void set_init(Statement* stmt) { init_ = stmt; }
Expression* cond() const { return cond_; }
@@ -486,14 +484,14 @@
public:
explicit inline ForInStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(ForInStatement)
+
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
enumerable_ = enumerable;
}
- virtual void Accept(AstVisitor* v);
-
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
@@ -508,10 +506,7 @@
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual ExpressionStatement* AsExpressionStatement() { return this; }
+ DECLARE_NODE_TYPE(ExpressionStatement)
virtual Assignment* StatementAsSimpleAssignment();
virtual CountOperation* StatementAsCountOperation();
@@ -529,7 +524,7 @@
explicit ContinueStatement(IterationStatement* target)
: target_(target) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ContinueStatement)
IterationStatement* target() const { return target_; }
@@ -543,7 +538,7 @@
explicit BreakStatement(BreakableStatement* target)
: target_(target) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(BreakStatement)
BreakableStatement* target() const { return target_; }
@@ -557,10 +552,7 @@
explicit ReturnStatement(Expression* expression)
: expression_(expression) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual ReturnStatement* AsReturnStatement() { return this; }
+ DECLARE_NODE_TYPE(ReturnStatement)
Expression* expression() { return expression_; }
@@ -574,7 +566,7 @@
explicit WithEnterStatement(Expression* expression, bool is_catch_block)
: expression_(expression), is_catch_block_(is_catch_block) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(WithEnterStatement)
Expression* expression() const { return expression_; }
@@ -590,7 +582,7 @@
public:
WithExitStatement() { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(WithExitStatement)
};
@@ -617,13 +609,13 @@
public:
explicit inline SwitchStatement(ZoneStringList* labels);
+ DECLARE_NODE_TYPE(SwitchStatement)
+
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
}
- virtual void Accept(AstVisitor* v);
-
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
@@ -647,7 +639,7 @@
then_statement_(then_statement),
else_statement_(else_statement) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(IfStatement)
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
@@ -717,7 +709,7 @@
catch_block_(catch_block) {
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(TryCatchStatement)
VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; }
@@ -734,7 +726,7 @@
: TryStatement(try_block),
finally_block_(finally_block) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
@@ -745,18 +737,13 @@
class DebuggerStatement: public Statement {
public:
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(DebuggerStatement)
};
class EmptyStatement: public Statement {
public:
- EmptyStatement() {}
-
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion.
- virtual EmptyStatement* AsEmptyStatement() { return this; }
+ DECLARE_NODE_TYPE(EmptyStatement)
};
@@ -764,13 +751,11 @@
public:
explicit Literal(Handle<Object> handle) : handle_(handle) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Literal)
+
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
- // Type testing & conversion.
- virtual Literal* AsLiteral() { return this; }
-
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_);
@@ -864,8 +849,7 @@
properties_(properties),
fast_elements_(fast_elements) {}
- virtual ObjectLiteral* AsObjectLiteral() { return this; }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ObjectLiteral)
Handle<FixedArray> constant_properties() const {
return constant_properties_;
@@ -891,7 +875,7 @@
pattern_(pattern),
flags_(flags) {}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(RegExpLiteral)
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@@ -914,8 +898,7 @@
constant_elements_(constant_elements),
values_(values) {}
- virtual void Accept(AstVisitor* v);
- virtual ArrayLiteral* AsArrayLiteral() { return this; }
+ DECLARE_NODE_TYPE(ArrayLiteral)
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -935,7 +918,7 @@
: key_(key), value_(value) {
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CatchExtensionObject)
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@@ -950,17 +933,13 @@
public:
explicit VariableProxy(Variable* var);
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(VariableProxy)
// Type testing & conversion
virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty();
}
- virtual VariableProxy* AsVariableProxy() {
- return this;
- }
-
Variable* AsVariable() {
if (this == NULL || var_ == NULL) return NULL;
Expression* rewrite = var_->rewrite();
@@ -1055,10 +1034,7 @@
ASSERT(var != NULL);
}
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual Slot* AsSlot() { return this; }
+ DECLARE_NODE_TYPE(Slot)
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
@@ -1085,10 +1061,7 @@
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual Property* AsProperty() { return this; }
+ DECLARE_NODE_TYPE(Property)
virtual bool IsValidLeftHandSide() { return true; }
@@ -1117,10 +1090,7 @@
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
-
- // Type testing and conversion.
- virtual Call* AsCall() { return this; }
+ DECLARE_NODE_TYPE(Call)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1142,7 +1112,7 @@
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1166,7 +1136,7 @@
ZoneList<Expression*>* arguments)
: name_(name), function_(function), arguments_(arguments) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CallRuntime)
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
@@ -1187,11 +1157,9 @@
ASSERT(Token::IsUnaryOp(op));
}
- virtual void Accept(AstVisitor* v);
- virtual bool ResultOverwriteAllowed();
+ DECLARE_NODE_TYPE(UnaryOperation)
- // Type testing & conversion
- virtual UnaryOperation* AsUnaryOperation() { return this; }
+ virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1215,11 +1183,9 @@
// Create the binary operation corresponding to a compound assignment.
explicit BinaryOperation(Assignment* assignment);
- virtual void Accept(AstVisitor* v);
- virtual bool ResultOverwriteAllowed();
+ DECLARE_NODE_TYPE(BinaryOperation)
- // Type testing & conversion
- virtual BinaryOperation* AsBinaryOperation() { return this; }
+ virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
@@ -1241,12 +1207,12 @@
ASSERT(Token::IsCountOp(op));
}
+ DECLARE_NODE_TYPE(IncrementOperation)
+
Token::Value op() const { return op_; }
bool is_increment() { return op_ == Token::INC; }
Expression* expression() const { return expression_; }
- virtual void Accept(AstVisitor* v);
-
private:
Token::Value op_;
Expression* expression_;
@@ -1259,9 +1225,7 @@
CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
: is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
- virtual void Accept(AstVisitor* v);
-
- virtual CountOperation* AsCountOperation() { return this; }
+ DECLARE_NODE_TYPE(CountOperation)
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
@@ -1294,16 +1258,13 @@
ASSERT(Token::IsCompareOp(op));
}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
int position() const { return pos_; }
- // Type testing & conversion
- virtual CompareOperation* AsCompareOperation() { return this; }
-
private:
Token::Value op_;
Expression* left_;
@@ -1317,7 +1278,7 @@
CompareToNull(bool is_strict, Expression* expression)
: is_strict_(is_strict), expression_(expression) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(CompareToNull)
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
@@ -1342,7 +1303,7 @@
then_expression_position_(then_expression_position),
else_expression_position_(else_expression_position) { }
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Conditional)
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
@@ -1368,8 +1329,7 @@
ASSERT(Token::IsAssignmentOp(op));
}
- virtual void Accept(AstVisitor* v);
- virtual Assignment* AsAssignment() { return this; }
+ DECLARE_NODE_TYPE(Assignment)
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
@@ -1406,7 +1366,7 @@
Throw(Expression* exception, int pos)
: exception_(exception), pos_(pos) {}
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@@ -1452,10 +1412,7 @@
#endif
}
- virtual void Accept(AstVisitor* v);
-
- // Type testing & conversion
- virtual FunctionLiteral* AsFunctionLiteral() { return this; }
+ DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; }
@@ -1522,12 +1479,12 @@
Handle<SharedFunctionInfo> shared_function_info)
: shared_function_info_(shared_function_info) { }
+ DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
+
Handle<SharedFunctionInfo> shared_function_info() const {
return shared_function_info_;
}
- virtual void Accept(AstVisitor* v);
-
private:
Handle<SharedFunctionInfo> shared_function_info_;
};
@@ -1535,7 +1492,7 @@
class ThisFunction: public Expression {
public:
- virtual void Accept(AstVisitor* v);
+ DECLARE_NODE_TYPE(ThisFunction)
};
diff --git a/src/compiler.cc b/src/compiler.cc
index 825198e..6ef5a1c 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -146,9 +146,10 @@
bool is_json = (validate == Compiler::VALIDATE_JSON);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (is_eval || is_json) {
- script->set_compilation_type(
- is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
- Smi::FromInt(Script::COMPILATION_TYPE_EVAL));
+ Script::CompilationType compilation_type = is_json
+ ? Script::COMPILATION_TYPE_JSON
+ : Script::COMPILATION_TYPE_EVAL;
+ script->set_compilation_type(Smi::FromInt(compilation_type));
// For eval scripts add information on the function from which eval was
// called.
if (is_eval) {
@@ -171,16 +172,16 @@
ASSERT(is_eval || is_global);
// Build AST.
+ EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit =
MakeAST(is_global, script, extension, pre_data, is_json);
- LiveEditFunctionTracker live_edit_tracker(lit);
-
// Check for parse errors.
if (lit == NULL) {
ASSERT(Top::has_pending_exception());
return Handle<SharedFunctionInfo>::null();
}
+ info.set_function(lit);
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
@@ -191,7 +192,7 @@
HistogramTimerScope timer(rate);
// Compile the code.
- CompilationInfo info(lit, script, is_eval);
+ LiveEditFunctionTracker live_edit_tracker(lit);
Handle<Code> code = MakeCode(context, &info);
// Check for stack-overflow exceptions.
@@ -376,20 +377,12 @@
// Compute name, source code and script data.
Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<String> name(String::cast(shared->name()));
-
- int start_position = shared->start_position();
- int end_position = shared->end_position();
- bool is_expression = shared->is_expression();
- Counters::total_compile_size.Increment(end_position - start_position);
+ int compiled_size = shared->end_position() - shared->start_position();
+ Counters::total_compile_size.Increment(compiled_size);
// Generate the AST for the lazily compiled function. The AST may be
// NULL in case of parser stack overflow.
- FunctionLiteral* lit = MakeLazyAST(info->script(),
- name,
- start_position,
- end_position,
- is_expression);
+ FunctionLiteral* lit = MakeLazyAST(shared);
// Check for parse errors.
if (lit == NULL) {
@@ -413,9 +406,9 @@
}
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
- name,
+ Handle<String>(String::cast(shared->name())),
Handle<String>(shared->inferred_name()),
- start_position,
+ shared->start_position(),
info->script(),
code);
@@ -482,7 +475,8 @@
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
- CompilationInfo info(literal, script, false);
+ EagerCompilationInfo info(script, false);
+ info.set_function(literal);
bool is_run_once = literal->try_full_codegen();
bool use_full = FLAG_full_compiler && !literal->contains_loops();
diff --git a/src/compiler.h b/src/compiler.h
index ed26603..ae0d6de 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -41,121 +41,112 @@
// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED {
public:
- // Lazy compilation of a JSFunction.
- CompilationInfo(Handle<JSFunction> closure,
- int loop_nesting,
- Handle<Object> receiver)
- : closure_(closure),
- function_(NULL),
- is_eval_(false),
- loop_nesting_(loop_nesting),
- receiver_(receiver) {
- Initialize();
- ASSERT(!closure_.is_null() &&
- shared_info_.is_null() &&
- script_.is_null());
+ virtual ~CompilationInfo() {}
+
+ // Dispatched behavior.
+ virtual Handle<SharedFunctionInfo> shared_info() const = 0;
+
+ virtual Handle<Script> script() const {
+ return Handle<Script>(Script::cast(shared_info()->script()));
}
- // Lazy compilation based on SharedFunctionInfo.
- explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
- : shared_info_(shared_info),
- function_(NULL),
- is_eval_(false),
- loop_nesting_(0) {
- Initialize();
- ASSERT(closure_.is_null() &&
- !shared_info_.is_null() &&
- script_.is_null());
+ virtual Handle<JSFunction> closure() const {
+ return Handle<JSFunction>::null();
}
- // Eager compilation.
- CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
- : script_(script),
- function_(literal),
- is_eval_(is_eval),
- loop_nesting_(0) {
- Initialize();
- ASSERT(closure_.is_null() &&
- shared_info_.is_null() &&
- !script_.is_null());
- }
+ virtual bool is_eval() const { return false; }
- // We can only get a JSFunction if we actually have one.
- Handle<JSFunction> closure() { return closure_; }
+ virtual int loop_nesting() const { return 0; }
- // We can get a SharedFunctionInfo from a JSFunction or if we actually
- // have one.
- Handle<SharedFunctionInfo> shared_info() {
- if (!closure().is_null()) {
- return Handle<SharedFunctionInfo>(closure()->shared());
- } else {
- return shared_info_;
- }
- }
-
- // We can always get a script. Either we have one or we can get a shared
- // function info.
- Handle<Script> script() {
- if (!script_.is_null()) {
- return script_;
- } else {
- ASSERT(shared_info()->script()->IsScript());
- return Handle<Script>(Script::cast(shared_info()->script()));
- }
- }
+ virtual bool has_global_object() const { return false; }
+ virtual GlobalObject* global_object() const { return NULL; }
// There should always be a function literal, but it may be set after
// construction (for lazy compilation).
FunctionLiteral* function() { return function_; }
void set_function(FunctionLiteral* literal) { function_ = literal; }
- // Simple accessors.
- bool is_eval() { return is_eval_; }
- int loop_nesting() { return loop_nesting_; }
- bool has_receiver() { return !receiver_.is_null(); }
- Handle<Object> receiver() { return receiver_; }
-
- bool has_this_properties() { return has_this_properties_; }
- void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
-
- bool has_global_object() {
- return !closure().is_null() && (closure()->context()->global() != NULL);
- }
-
- GlobalObject* global_object() {
- return has_global_object() ? closure()->context()->global() : NULL;
- }
-
- bool has_globals() { return has_globals_; }
- void set_has_globals(bool flag) { has_globals_ = flag; }
-
// Derived accessors.
Scope* scope() { return function()->scope(); }
+ protected:
+ CompilationInfo() : function_(NULL) {}
+
private:
- void Initialize() {
- has_this_properties_ = false;
- has_globals_ = false;
- }
-
- Handle<JSFunction> closure_;
- Handle<SharedFunctionInfo> shared_info_;
- Handle<Script> script_;
-
FunctionLiteral* function_;
- bool is_eval_;
- int loop_nesting_;
-
- Handle<Object> receiver_;
-
- bool has_this_properties_;
- bool has_globals_;
-
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
+class EagerCompilationInfo: public CompilationInfo {
+ public:
+ EagerCompilationInfo(Handle<Script> script, bool is_eval)
+ : script_(script), is_eval_(is_eval) {
+ ASSERT(!script.is_null());
+ }
+
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ virtual Handle<Script> script() const { return script_; }
+
+ virtual bool is_eval() const { return is_eval_; }
+
+ private:
+ Handle<Script> script_;
+ bool is_eval_;
+};
+
+
+class LazySharedCompilationInfo: public CompilationInfo {
+ public:
+ explicit LazySharedCompilationInfo(Handle<SharedFunctionInfo> shared_info)
+ : shared_info_(shared_info) {
+ ASSERT(!shared_info.is_null());
+ }
+
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return shared_info_;
+ }
+
+ private:
+ Handle<SharedFunctionInfo> shared_info_;
+};
+
+
+class LazyFunctionCompilationInfo: public CompilationInfo {
+ public:
+ LazyFunctionCompilationInfo(Handle<JSFunction> closure,
+ int loop_nesting)
+ : closure_(closure), loop_nesting_(loop_nesting) {
+ ASSERT(!closure.is_null());
+ }
+
+ // Overridden functions from the base class.
+ virtual Handle<SharedFunctionInfo> shared_info() const {
+ return Handle<SharedFunctionInfo>(closure_->shared());
+ }
+
+ virtual int loop_nesting() const { return loop_nesting_; }
+
+ virtual bool has_global_object() const {
+ return closure_->context()->global() != NULL;
+ }
+
+ virtual GlobalObject* global_object() const {
+ return closure_->context()->global();
+ }
+
+ private:
+ Handle<JSFunction> closure_;
+ int loop_nesting_;
+};
+
+
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
diff --git a/src/contexts.cc b/src/contexts.cc
index 723354f..1ce5007 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -90,7 +90,7 @@
do {
if (FLAG_trace_contexts) {
- PrintF(" - looking in context %p", *context);
+ PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
if (context->IsGlobalContext()) PrintF(" (global context)");
PrintF("\n");
}
@@ -110,7 +110,8 @@
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n", *extension);
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*extension));
}
return extension;
}
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 44a1050..02aacb5 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -42,7 +42,7 @@
if (Contains(i)) {
if (!first) PrintF(",");
first = false;
- PrintF("%d");
+ PrintF("%d", i);
}
}
PrintF("}");
diff --git a/src/disassembler.cc b/src/disassembler.cc
index e79421f..2a4ea74 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -44,7 +44,10 @@
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
for (byte* pc = begin; pc < end; pc++) {
if (f == NULL) {
- PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n", pc, pc - begin, *pc);
+ PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
+ reinterpret_cast<intptr_t>(pc),
+ pc - begin,
+ *pc);
} else {
fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 263a2a4..84a0eaa 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -108,6 +108,8 @@
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available")
+DEFINE_bool(enable_sse4_1, true,
+ "enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
DEFINE_bool(enable_rdtsc, true,
@@ -179,8 +181,8 @@
"always inline smi code in non-opt code")
// heap.cc
-DEFINE_int(max_new_space_size, 0, "max size of the new generation")
-DEFINE_int(max_old_space_size, 0, "max size of the old generation")
+DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
+DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
diff --git a/src/global-handles.cc b/src/global-handles.cc
index a909caf..0207322 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -486,7 +486,7 @@
}
PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %dB\n", sizeof(Node) * total);
+ PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
@@ -497,8 +497,10 @@
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (Node* current = head_; current != NULL; current = current->next()) {
- PrintF(" handle %p to %p (weak=%d)\n", current->handle().location(),
- *current->handle(), current->state_ == Node::WEAK);
+ PrintF(" handle %p to %p (weak=%d)\n",
+ reinterpret_cast<void*>(current->handle().location()),
+ reinterpret_cast<void*>(*current->handle()),
+ current->state_ == Node::WEAK);
}
}
diff --git a/src/handles.cc b/src/handles.cc
index 78a7fcf..0207492 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -779,20 +779,19 @@
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
- CompilationInfo info(shared);
+ LazySharedCompilationInfo info(shared);
return CompileLazyHelper(&info, flag);
}
bool CompileLazy(Handle<JSFunction> function,
- Handle<Object> receiver,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
- CompilationInfo info(function, 0, receiver);
+ LazyFunctionCompilationInfo info(function, 0);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
@@ -801,14 +800,13 @@
bool CompileLazyInLoop(Handle<JSFunction> function,
- Handle<Object> receiver,
ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
- CompilationInfo info(function, 1, receiver);
+ LazyFunctionCompilationInfo info(function, 1);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
diff --git a/src/handles.h b/src/handles.h
index 135dbfb..69170ff 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -345,13 +345,9 @@
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
-bool CompileLazy(Handle<JSFunction> function,
- Handle<Object> receiver,
- ClearExceptionFlag flag);
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-bool CompileLazyInLoop(Handle<JSFunction> function,
- Handle<Object> receiver,
- ClearExceptionFlag flag);
+bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED {
public:
diff --git a/src/heap-inl.h b/src/heap-inl.h
index b68f5c1..27a14bc 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -36,7 +36,7 @@
namespace internal {
void Heap::UpdateOldSpaceLimits() {
- int old_gen_size = PromotedSpaceSize();
+ intptr_t old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
diff --git a/src/heap.cc b/src/heap.cc
index 047e331..23bfbd8 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -63,8 +63,8 @@
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
-int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
-int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
int Heap::old_gen_exhausted_ = false;
@@ -75,19 +75,19 @@
// a multiple of Page::kPageSize.
#if defined(ANDROID)
int Heap::max_semispace_size_ = 2*MB;
-int Heap::max_old_generation_size_ = 192*MB;
+intptr_t Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB;
-size_t Heap::code_range_size_ = 0;
+intptr_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
int Heap::max_semispace_size_ = 16*MB;
-int Heap::max_old_generation_size_ = 1*GB;
+intptr_t Heap::max_old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
-size_t Heap::code_range_size_ = 512*MB;
+intptr_t Heap::code_range_size_ = 512*MB;
#else
int Heap::max_semispace_size_ = 8*MB;
-int Heap::max_old_generation_size_ = 512*MB;
+intptr_t Heap::max_old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
-size_t Heap::code_range_size_ = 0;
+intptr_t Heap::code_range_size_ = 0;
#endif
// The snapshot semispace size will be the default semispace size if
@@ -108,7 +108,7 @@
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
int Heap::survived_since_last_expansion_ = 0;
-int Heap::external_allocation_limit_ = 0;
+intptr_t Heap::external_allocation_limit_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
@@ -137,13 +137,13 @@
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
-int GCTracer::alive_after_last_gc_ = 0;
+intptr_t GCTracer::alive_after_last_gc_ = 0;
double GCTracer::last_gc_end_timestamp_ = 0.0;
int GCTracer::max_gc_pause_ = 0;
-int GCTracer::max_alive_after_gc_ = 0;
+intptr_t GCTracer::max_alive_after_gc_ = 0;
int GCTracer::min_in_mutator_ = kMaxInt;
-int Heap::Capacity() {
+intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0;
return new_space_.Capacity() +
@@ -155,7 +155,7 @@
}
-int Heap::CommittedMemory() {
+intptr_t Heap::CommittedMemory() {
if (!HasBeenSetup()) return 0;
return new_space_.CommittedMemory() +
@@ -168,7 +168,7 @@
}
-int Heap::Available() {
+intptr_t Heap::Available() {
if (!HasBeenSetup()) return 0;
return new_space_.Available() +
@@ -289,33 +289,46 @@
#if defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintF("Memory allocator, used: %8d, available: %8d\n",
+ PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
MemoryAllocator::Size(),
MemoryAllocator::Available());
- PrintF("New space, used: %8d, available: %8d\n",
+ PrintF("New space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
Heap::new_space_.Size(),
new_space_.Available());
- PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
old_pointer_space_->Size(),
old_pointer_space_->Available(),
old_pointer_space_->Waste());
- PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
old_data_space_->Size(),
old_data_space_->Available(),
old_data_space_->Waste());
- PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
code_space_->Size(),
code_space_->Available(),
code_space_->Waste());
- PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
map_space_->Size(),
map_space_->Available(),
map_space_->Waste());
- PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
+ PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d"
+ ", waste: %8" V8_PTR_PREFIX "d\n",
cell_space_->Size(),
cell_space_->Available(),
cell_space_->Waste());
- PrintF("Large object space, used: %8d, avaialble: %8d\n",
+ PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
+ ", available: %8" V8_PTR_PREFIX "d\n",
lo_space_->Size(),
lo_space_->Available());
}
@@ -364,8 +377,8 @@
#endif
}
-int Heap::SizeOfObjects() {
- int total = 0;
+intptr_t Heap::SizeOfObjects() {
+ intptr_t total = 0;
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Size();
@@ -388,7 +401,7 @@
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
- Counters::alive_after_last_gc.Set(SizeOfObjects());
+ Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
@@ -690,7 +703,7 @@
EnsureFromSpaceIsCommitted();
- int start_new_space_size = Heap::new_space()->Size();
+ int start_new_space_size = Heap::new_space()->SizeAsInt();
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
@@ -962,7 +975,7 @@
DescriptorLookupCache::Clear();
// Used for updating survived_since_last_expansion_ at function end.
- int survived_watermark = PromotedSpaceSize();
+ intptr_t survived_watermark = PromotedSpaceSize();
CheckNewSpaceExpansionCriteria();
@@ -1032,8 +1045,8 @@
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
+ IncrementYoungSurvivorsCounter(static_cast<int>(
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
LOG(ResourceEvent("scavenge", "end"));
@@ -3496,8 +3509,10 @@
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
PrintF("mark-compact GC : %d\n", mc_count_);
- PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
- PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
+ PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
+ old_gen_promotion_limit_);
+ PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+ old_gen_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4069,15 +4084,16 @@
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
+ return ConfigureHeap(
+ FLAG_max_new_space_size * (KB / 2), FLAG_max_old_space_size * MB);
}
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker;
- *stats->new_space_size = new_space_.Size();
- *stats->new_space_capacity = new_space_.Capacity();
+ *stats->new_space_size = new_space_.SizeAsInt();
+ *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
*stats->old_pointer_space_size = old_pointer_space_->Size();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
*stats->old_data_space_size = old_data_space_->Size();
@@ -4111,7 +4127,7 @@
}
-int Heap::PromotedSpaceSize() {
+intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
@@ -4222,8 +4238,8 @@
if (!CreateInitialObjects()) return false;
}
- LOG(IntEvent("heap-capacity", Capacity()));
- LOG(IntEvent("heap-available", Available()));
+ LOG(IntPtrTEvent("heap-capacity", Capacity()));
+ LOG(IntPtrTEvent("heap-available", Available()));
#ifdef ENABLE_LOGGING_AND_PROFILING
// This should be called only after initial objects have been created.
@@ -4257,7 +4273,8 @@
PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
- PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
+ PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
+ GCTracer::get_max_alive_after_gc());
PrintF("\n\n");
}
@@ -4383,7 +4400,9 @@
public:
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n", p, *p);
+ PrintF(" handle %p to %p\n",
+ reinterpret_cast<void*>(p),
+ reinterpret_cast<void*>(*p));
}
};
@@ -4736,8 +4755,8 @@
#endif
-static int CountTotalHolesSize() {
- int holes_size = 0;
+static intptr_t CountTotalHolesSize() {
+ intptr_t holes_size = 0;
OldSpaces spaces;
for (OldSpace* space = spaces.next();
space != NULL;
@@ -4835,13 +4854,14 @@
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
- PrintF("total_size_before=%d ", start_size_);
- PrintF("total_size_after=%d ", Heap::SizeOfObjects());
- PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%d ", CountTotalHolesSize());
+ PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
+ PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
+ PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
+ in_free_list_or_wasted_before_gc_);
+ PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
- PrintF("allocated=%d ", allocated_since_last_gc_);
- PrintF("promoted=%d ", promoted_objects_size_);
+ PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
+ PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
PrintF("\n");
}
diff --git a/src/heap.h b/src/heap.h
index 8a11530..b1ef19f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -245,31 +245,31 @@
// semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size.
- static int MaxReserved() {
+ static intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
}
static int MaxSemiSpaceSize() { return max_semispace_size_; }
static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
static int InitialSemiSpaceSize() { return initial_semispace_size_; }
- static int MaxOldGenerationSize() { return max_old_generation_size_; }
+ static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- static int Capacity();
+ static intptr_t Capacity();
// Returns the amount of memory currently committed for the heap.
- static int CommittedMemory();
+ static intptr_t CommittedMemory();
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- static int Available();
+ static intptr_t Available();
// Returns the maximum object size in paged space.
static inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
- static int SizeOfObjects();
+ static intptr_t SizeOfObjects();
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
@@ -1069,8 +1069,8 @@
static int reserved_semispace_size_;
static int max_semispace_size_;
static int initial_semispace_size_;
- static int max_old_generation_size_;
- static size_t code_range_size_;
+ static intptr_t max_old_generation_size_;
+ static intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1098,7 +1098,7 @@
static HeapState gc_state_;
// Returns the size of object residing in non new spaces.
- static int PromotedSpaceSize();
+ static intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
static int PromotedExternalMemorySize();
@@ -1133,16 +1133,16 @@
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
- static int old_gen_promotion_limit_;
+ static intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
- static int old_gen_allocation_limit_;
+ static intptr_t old_gen_allocation_limit_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
- static int external_allocation_limit_;
+ static intptr_t external_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
@@ -1231,8 +1231,8 @@
GCTracer* tracer,
CollectionPolicy collectionPolicy);
- static const int kMinimumPromotionLimit = 2 * MB;
- static const int kMinimumAllocationLimit = 8 * MB;
+ static const intptr_t kMinimumPromotionLimit = 2 * MB;
+ static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline static void UpdateOldSpaceLimits();
@@ -1385,24 +1385,24 @@
int* start_marker; // 0
int* new_space_size; // 1
int* new_space_capacity; // 2
- int* old_pointer_space_size; // 3
- int* old_pointer_space_capacity; // 4
- int* old_data_space_size; // 5
- int* old_data_space_capacity; // 6
- int* code_space_size; // 7
- int* code_space_capacity; // 8
- int* map_space_size; // 9
- int* map_space_capacity; // 10
- int* cell_space_size; // 11
- int* cell_space_capacity; // 12
- int* lo_space_size; // 13
+ intptr_t* old_pointer_space_size; // 3
+ intptr_t* old_pointer_space_capacity; // 4
+ intptr_t* old_data_space_size; // 5
+ intptr_t* old_data_space_capacity; // 6
+ intptr_t* code_space_size; // 7
+ intptr_t* code_space_capacity; // 8
+ intptr_t* map_space_size; // 9
+ intptr_t* map_space_capacity; // 10
+ intptr_t* cell_space_size; // 11
+ intptr_t* cell_space_capacity; // 12
+ intptr_t* lo_space_size; // 13
int* global_handle_count; // 14
int* weak_global_handle_count; // 15
int* pending_global_handle_count; // 16
int* near_death_global_handle_count; // 17
int* destroyed_global_handle_count; // 18
- int* memory_allocator_size; // 19
- int* memory_allocator_capacity; // 20
+ intptr_t* memory_allocator_size; // 19
+ intptr_t* memory_allocator_capacity; // 20
int* objects_per_type; // 21
int* size_per_type; // 22
int* os_error; // 23
@@ -1837,7 +1837,7 @@
static int get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC.
- static int get_max_alive_after_gc() { return max_alive_after_gc_; }
+ static intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections.
static int get_min_in_mutator() { return min_in_mutator_; }
@@ -1852,7 +1852,7 @@
}
double start_time_; // Timestamp set in the constructor.
- int start_size_; // Size of objects in heap set in constructor.
+ intptr_t start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
// A count (including this one, eg, the first collection is 1) of the
@@ -1884,30 +1884,30 @@
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
- int in_free_list_or_wasted_before_gc_;
+ intptr_t in_free_list_or_wasted_before_gc_;
// Difference between space used in the heap at the beginning of the current
// collection and the end of the previous collection.
- int allocated_since_last_gc_;
+ intptr_t allocated_since_last_gc_;
// Amount of time spent in mutator that is time elapsed between end of the
// previous collection and the beginning of the current one.
double spent_in_mutator_;
// Size of objects promoted during the current collection.
- int promoted_objects_size_;
+ intptr_t promoted_objects_size_;
// Maximum GC pause.
static int max_gc_pause_;
// Maximum size of objects alive after GC.
- static int max_alive_after_gc_;
+ static intptr_t max_alive_after_gc_;
// Minimal interval between two subsequent collections.
static int min_in_mutator_;
// Size of objects alive after last GC.
- static int alive_after_last_gc_;
+ static intptr_t alive_after_last_gc_;
static double last_gc_end_timestamp_;
};
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e2f4547..e201179 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2414,7 +2414,7 @@
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 1dab0a6..d8051c8 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -376,6 +376,7 @@
static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
+ if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 413c36e..a2990a2 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -885,8 +885,8 @@
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
+ // Check that the key is an array index, that is Uint32.
+ __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index dd0d636..bb0a46c 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1944,6 +1944,109 @@
}
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into eax.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_smi);
+
+ // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
+ // otherwise.
+ __ mov(ebx, eax);
+ __ sar(ebx, kBitsPerInt - 1);
+
+ // Do bitwise not or do nothing depending on ebx.
+ __ xor_(eax, Operand(ebx));
+
+ // Add 1 or do nothing depending on ebx.
+ __ sub(eax, Operand(ebx));
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ j(negative, &slow);
+
+ // Smi case done.
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign into ebx.
+ __ bind(¬_smi);
+ __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ test(ebx, Immediate(HeapNumber::kSignMask));
+ __ j(not_zero, &negative_sign);
+ __ ret(2 * kPointerSize);
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ and_(ebx, ~HeapNumber::kSignMask);
+ __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+ __ AllocateHeapNumber(eax, edi, edx, &slow);
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // ecx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
diff --git a/src/ic.cc b/src/ic.cc
index 5b62a8a..a9c2a48 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1541,18 +1541,17 @@
// Static IC stub generators.
//
-static Object* CompileFunction(Object* result,
- Handle<Object> object,
- InLoopFlag in_loop) {
+static JSFunction* CompileFunction(JSFunction* function,
+ InLoopFlag in_loop) {
// Compile now with optimization.
HandleScope scope;
- Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
+ Handle<JSFunction> function_handle(function);
if (in_loop == IN_LOOP) {
- CompileLazyInLoop(function, object, CLEAR_EXCEPTION);
+ CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else {
- CompileLazy(function, object, CLEAR_EXCEPTION);
+ CompileLazy(function_handle, CLEAR_EXCEPTION);
}
- return *function;
+ return *function_handle;
}
@@ -1575,7 +1574,7 @@
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
+ return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
}
@@ -1591,7 +1590,7 @@
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop());
+ return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index c07e83f..2fae3af 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -408,6 +408,7 @@
// Build AST.
ScriptDataImpl* pre_data = NULL;
+ EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
// Check for parse errors.
@@ -415,10 +416,9 @@
ASSERT(Top::has_pending_exception());
return;
}
+ info.set_function(lit);
// Compile the code.
- CompilationInfo info(lit, script, is_eval);
-
LiveEditFunctionTracker tracker(lit);
Handle<Code> code = MakeCodeForLiveEdit(&info);
diff --git a/src/log.cc b/src/log.cc
index 5c70057..4230cba 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -393,6 +393,13 @@
}
+void Logger::IntPtrTEvent(const char* name, intptr_t value) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (FLAG_log) UncheckedIntPtrTEvent(name, value);
+#endif
+}
+
+
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!Log::IsEnabled()) return;
@@ -403,6 +410,16 @@
#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
+ if (!Log::IsEnabled()) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
+ msg.WriteToLogFile();
+}
+#endif
+
+
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_handles) return;
@@ -1005,11 +1022,12 @@
void Logger::HeapSampleStats(const char* space, const char* kind,
- int capacity, int used) {
+ intptr_t capacity, intptr_t used) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
- msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n",
+ msg.Append("heap-sample-stats,\"%s\",\"%s\","
+ "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
space, kind, capacity, used);
msg.WriteToLogFile();
#endif
diff --git a/src/log.h b/src/log.h
index 2534e1e..e513737 100644
--- a/src/log.h
+++ b/src/log.h
@@ -159,6 +159,7 @@
// Emits an event with an int value -> (name, value).
static void IntEvent(const char* name, int value);
+ static void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
static void HandleEvent(const char* name, Object** location);
@@ -237,7 +238,7 @@
static void HeapSampleJSProducerEvent(const char* constructor,
Address* stack);
static void HeapSampleStats(const char* space, const char* kind,
- int capacity, int used);
+ intptr_t capacity, intptr_t used);
static void SharedLibraryEvent(const char* library_path,
uintptr_t start,
@@ -326,6 +327,7 @@
// Logs an IntEvent regardless of whether FLAG_log is true.
static void UncheckedIntEvent(const char* name, int value);
+ static void UncheckedIntPtrTEvent(const char* name, intptr_t value);
// Stops logging and profiling in case of insufficient resources.
static void StopLoggingAndProfiling();
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index c847b84..26f88cf 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -167,8 +167,8 @@
// reclaiming the waste and free list blocks).
static const int kFragmentationLimit = 15; // Percent.
static const int kFragmentationAllowed = 1 * MB; // Absolute.
- int old_gen_recoverable = 0;
- int old_gen_used = 0;
+ intptr_t old_gen_recoverable = 0;
+ intptr_t old_gen_used = 0;
OldSpaces spaces;
for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
@@ -2008,8 +2008,10 @@
#ifdef DEBUG
if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(),
- map, new_map);
+ PrintF("update %p : %p -> %p\n",
+ obj->address(),
+ reinterpret_cast<void*>(map),
+ reinterpret_cast<void*>(new_map));
}
#endif
}
@@ -2068,8 +2070,8 @@
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
- int live_maps_size = Heap::map_space()->Size();
- int live_maps = live_maps_size / Map::kSize;
+ intptr_t live_maps_size = Heap::map_space()->Size();
+ int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size);
if (Heap::map_space()->NeedsCompaction(live_maps)) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index ed08468..5883f8b 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -89,7 +89,7 @@
void HeapObject::PrintHeader(const char* id) {
- PrintF("%p: [%s]\n", this, id);
+ PrintF("%p: [%s]\n", reinterpret_cast<void*>(this), id);
}
@@ -522,9 +522,9 @@
void JSObject::JSObjectPrint() {
- PrintF("%p: [JSObject]\n", this);
- PrintF(" - map = %p\n", map());
- PrintF(" - prototype = %p\n", GetPrototype());
+ PrintF("%p: [JSObject]\n", reinterpret_cast<void*>(this));
+ PrintF(" - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(" - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
PrintF(" {\n");
PrintProperties();
PrintElements();
@@ -744,7 +744,7 @@
void JSFunction::JSFunctionPrint() {
HeapObject::PrintHeader("Function");
- PrintF(" - map = 0x%p\n", map());
+ PrintF(" - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(" - initial_map = ");
if (has_initial_map()) {
initial_map()->ShortPrint();
@@ -1224,9 +1224,9 @@
void BreakPointInfo::BreakPointInfoPrint() {
HeapObject::PrintHeader("BreakPointInfo");
- PrintF("\n - code_position: %d", code_position());
- PrintF("\n - source_position: %d", source_position());
- PrintF("\n - statement_position: %d", statement_position());
+ PrintF("\n - code_position: %d", code_position()->value());
+ PrintF("\n - source_position: %d", source_position()->value());
+ PrintF("\n - statement_position: %d", statement_position()->value());
PrintF("\n - break_point_objects: ");
break_point_objects()->ShortPrint();
}
diff --git a/src/parser.cc b/src/parser.cc
index a3f469a..a386848 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -115,11 +115,7 @@
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
bool in_global_context);
- FunctionLiteral* ParseLazy(Handle<String> source,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression);
+ FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
FunctionLiteral* ParseJson(Handle<String> source);
// The minimum number of contiguous assignment that will
@@ -877,12 +873,30 @@
virtual int function_position() { return 0; }
virtual int symbol_position() { return 0; }
virtual int symbol_ids() { return 0; }
+ virtual void PauseRecording() {}
+ virtual void ResumeRecording() {}
virtual Vector<unsigned> ExtractData() {
return Vector<unsigned>();
};
};
+
+class ConditionalLogPauseScope {
+ public:
+ ConditionalLogPauseScope(bool pause, ParserLog* log)
+ : log_(log), pause_(pause) {
+ if (pause) log->PauseRecording();
+ }
+ ~ConditionalLogPauseScope() {
+ if (pause_) log_->ResumeRecording();
+ }
+ private:
+ ParserLog* log_;
+ bool pause_;
+};
+
+
class AstBuildingParserFactory : public ParserFactory {
public:
explicit AstBuildingParserFactory(int expected_symbols)
@@ -970,15 +984,31 @@
return data;
}
+ virtual void PauseRecording() {
+ pause_count_++;
+ is_recording_ = false;
+ }
+
+ virtual void ResumeRecording() {
+ ASSERT(pause_count_ > 0);
+ if (--pause_count_ == 0) is_recording_ = !has_error();
+ }
+
protected:
bool has_error() {
return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
}
+ bool is_recording() {
+ return is_recording_;
+ }
void WriteString(Vector<const char> str);
Collector<unsigned> function_store_;
unsigned preamble_[ScriptDataImpl::kHeaderSize];
+ bool is_recording_;
+ int pause_count_;
+
#ifdef DEBUG
int prev_start;
#endif
@@ -991,6 +1021,7 @@
CompleteParserRecorder();
virtual void LogSymbol(int start, Vector<const char> literal) {
+ if (!is_recording_) return;
int hash = vector_hash(literal);
HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
@@ -1061,13 +1092,6 @@
};
-void ScriptDataImpl::SkipFunctionEntry(int start) {
- ASSERT(function_index_ + FunctionEntry::kSize <= store_.length());
- ASSERT(static_cast<int>(store_[function_index_]) == start);
- function_index_ += FunctionEntry::kSize;
-}
-
-
FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
@@ -1126,7 +1150,10 @@
-PartialParserRecorder::PartialParserRecorder() : function_store_(0) {
+PartialParserRecorder::PartialParserRecorder()
+ : function_store_(0),
+ is_recording_(true),
+ pause_count_(0) {
preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
preamble_[ScriptDataImpl::kHasErrorOffset] = false;
@@ -1202,6 +1229,7 @@
for (int i = 0; i < args.length(); i++) {
WriteString(CStrVector(args[i]));
}
+ is_recording_ = false;
}
@@ -1248,7 +1276,7 @@
ASSERT(start > prev_start);
prev_start = start;
#endif
- if (has_error()) return FunctionEntry();
+ if (!is_recording_) return FunctionEntry();
FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0));
result.set_start_pos(start);
return result;
@@ -1343,6 +1371,8 @@
bool inside_with) {
ASSERT(parent != NULL);
parent->type_ = type;
+ // Initialize function is hijacked by DummyScope to increment scope depth.
+ parent->Initialize(inside_with);
return parent;
}
@@ -1415,6 +1445,7 @@
}
~LexicalScope() {
+ parser_->top_scope_->Leave();
parser_->top_scope_ = prev_scope_;
parser_->with_nesting_level_ = prev_level_;
}
@@ -1480,7 +1511,8 @@
NoHandleAllocation no_handle_allocation;
scanner_.Initialize(source, stream, JAVASCRIPT);
ASSERT(target_stack_ == NULL);
- mode_ = PARSE_EAGERLY;
+ mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
DummyScope top_scope;
LexicalScope scope(this, &top_scope);
TemporaryScope temp_scope(this);
@@ -1551,21 +1583,20 @@
}
-FunctionLiteral* Parser::ParseLazy(Handle<String> source,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression) {
+FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
+ Handle<String> source(String::cast(script_->source()));
Counters::total_parse_size.Increment(source->length());
+ Handle<String> name(String::cast(info->name()));
fni_ = new FuncNameInferrer();
fni_->PushEnclosingName(name);
// Initialize parser state.
source->TryFlatten();
- scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
+ scanner_.Initialize(source, info->start_position(), info->end_position(),
+ JAVASCRIPT);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
@@ -1580,7 +1611,8 @@
LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this);
- FunctionLiteralType type = is_expression ? EXPRESSION : DECLARATION;
+ FunctionLiteralType type =
+ info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true;
result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree.
@@ -1601,6 +1633,7 @@
return result;
}
+
FunctionLiteral* Parser::ParseJson(Handle<String> source) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
@@ -1658,7 +1691,10 @@
Handle<String> Parser::GetSymbol(bool* ok) {
- log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
+ if (is_pre_parsing_) {
+ log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
+ return Handle<String>::null();
+ }
int symbol_id = -1;
if (pre_data() != NULL) {
symbol_id = pre_data()->GetSymbolIdentifier();
@@ -1971,7 +2007,7 @@
}
// Propagate the collected information on this property assignments.
- if (top_scope_->is_function_scope()) {
+ if (!is_pre_parsing_ && top_scope_->is_function_scope()) {
bool only_simple_this_property_assignments =
this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0;
@@ -4123,8 +4159,8 @@
int num_parameters = 0;
// Parse function body.
- { Scope::Type type = Scope::FUNCTION_SCOPE;
- Scope* scope = factory()->NewScope(top_scope_, type, inside_with());
+ { Scope* scope =
+ factory()->NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this);
top_scope_->SetScopeName(name);
@@ -4155,7 +4191,9 @@
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
- if (!function_name.is_null() && function_name->length() > 0) {
+ if (!is_pre_parsing_
+ && !function_name.is_null()
+ && function_name->length() > 0) {
Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
VariableProxy* fproxy =
top_scope_->NewUnresolved(function_name, inside_with());
@@ -4189,22 +4227,18 @@
}
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
- pre_data()->Skip(entry.predata_function_skip(),
- entry.predata_symbol_skip());
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
Expect(Token::RBRACE, CHECK_OK);
} else {
- if (pre_data() != NULL) {
- // Skip pre-data entry for non-lazily compiled function.
- pre_data()->SkipFunctionEntry(function_block_pos);
+ FunctionEntry entry;
+ if (is_lazily_compiled) entry = log()->LogFunction(function_block_pos);
+ {
+ ConditionalLogPauseScope pause_if(is_lazily_compiled, log());
+ ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
}
- FunctionEntry entry = log()->LogFunction(function_block_pos);
- int predata_function_position_before = log()->function_position();
- int predata_symbol_position_before = log()->symbol_position();
- ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
only_simple_this_property_assignments =
@@ -4214,13 +4248,11 @@
Expect(Token::RBRACE, CHECK_OK);
end_pos = scanner_.location().end_pos;
if (entry.is_valid()) {
+ ASSERT(is_lazily_compiled);
+ ASSERT(is_pre_parsing_);
entry.set_end_pos(end_pos);
entry.set_literal_count(materialized_literal_count);
entry.set_property_count(expected_property_count);
- entry.set_predata_function_skip(
- log()->function_position() - predata_function_position_before);
- entry.set_predata_symbol_skip(
- log()->symbol_position() - predata_symbol_position_before);
}
}
@@ -5440,12 +5472,6 @@
// ----------------------------------------------------------------------------
// The Parser interface.
-// MakeAST() is just a wrapper for the corresponding Parser calls
-// so we don't have to expose the entire Parser class in the .h file.
-
-static bool always_allow_natives_syntax = false;
-
-
ParserMessage::~ParserMessage() {
for (int i = 0; i < args().length(); i++)
DeleteArray(args()[i]);
@@ -5480,9 +5506,7 @@
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
PartialPreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single
@@ -5540,9 +5564,7 @@
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
CompletePreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single
@@ -5574,15 +5596,15 @@
}
+// MakeAST is just a wrapper for the corresponding Parser calls so we don't
+// have to expose the entire Parser class in the .h file.
FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script,
v8::Extension* extension,
ScriptDataImpl* pre_data,
bool is_json) {
bool allow_natives_syntax =
- always_allow_natives_syntax ||
- FLAG_allow_natives_syntax ||
- Bootstrapper::IsActive();
+ FLAG_allow_natives_syntax || Bootstrapper::IsActive();
AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
@@ -5608,25 +5630,13 @@
}
-FunctionLiteral* MakeLazyAST(Handle<Script> script,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression) {
- bool allow_natives_syntax_before = always_allow_natives_syntax;
- always_allow_natives_syntax = true;
- AstBuildingParser parser(script, true, NULL, NULL); // always allow
- always_allow_natives_syntax = allow_natives_syntax_before;
- // Parse the function by pointing to the function source in the script source.
- Handle<String> script_source(String::cast(script->source()));
- FunctionLiteral* result =
- parser.ParseLazy(script_source, name,
- start_position, end_position, is_expression);
+FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info) {
+ Handle<Script> script(Script::cast(info->script()));
+ AstBuildingParser parser(script, true, NULL, NULL);
+ FunctionLiteral* result = parser.ParseLazy(info);
return result;
}
-
#undef NEW
-
} } // namespace v8::internal
diff --git a/src/parser.h b/src/parser.h
index 8c00857..9a82889 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -72,19 +72,9 @@
backing_[kPropertyCountOffset] = value;
}
- int predata_function_skip() { return backing_[kPredataFunctionSkipOffset]; }
- void set_predata_function_skip(int value) {
- backing_[kPredataFunctionSkipOffset] = value;
- }
-
- int predata_symbol_skip() { return backing_[kPredataSymbolSkipOffset]; }
- void set_predata_symbol_skip(int value) {
- backing_[kPredataSymbolSkipOffset] = value;
- }
-
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 6;
+ static const int kSize = 4;
private:
Vector<unsigned> backing_;
@@ -92,8 +82,6 @@
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
- static const int kPredataFunctionSkipOffset = 4;
- static const int kPredataSymbolSkipOffset = 5;
};
@@ -117,7 +105,6 @@
FunctionEntry GetFunctionEntry(int start);
int GetSymbolIdentifier();
- void SkipFunctionEntry(int start);
bool SanityCheck();
Scanner::Location MessageLocation();
@@ -133,28 +120,8 @@
unsigned magic() { return store_[kMagicOffset]; }
unsigned version() { return store_[kVersionOffset]; }
- // Skip forward in the preparser data by the given number
- // of unsigned ints of function entries and the given number of bytes of
- // symbol id encoding.
- void Skip(int function_entries, int symbol_entries) {
- ASSERT(function_entries >= 0);
- ASSERT(function_entries
- <= (static_cast<int>(store_[kFunctionsSizeOffset])
- - (function_index_ - kHeaderSize)));
- ASSERT(symbol_entries >= 0);
- ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
-
- unsigned max_function_skip = store_[kFunctionsSizeOffset] -
- static_cast<unsigned>(function_index_ - kHeaderSize);
- function_index_ +=
- Min(static_cast<unsigned>(function_entries), max_function_skip);
- symbol_data_ +=
- Min(static_cast<unsigned>(symbol_entries),
- static_cast<unsigned>(symbol_data_end_ - symbol_data_));
- }
-
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 3;
+ static const unsigned kCurrentVersion = 4;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
@@ -186,9 +153,10 @@
ScriptDataImpl(const char* backing_store, int length)
: store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
- length / sizeof(unsigned)),
+ length / static_cast<int>(sizeof(unsigned))),
owns_store_(false) {
- ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
+ ASSERT_EQ(0, static_cast<int>(
+ reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
}
// Read strings written by ParserRecorder::WriteString.
@@ -224,20 +192,8 @@
RegExpCompileData* result);
-// Support for doing lazy compilation. The script is the script containing full
-// source of the script where the function is declared. The start_position and
-// end_position specifies the part of the script source which has the source
-// for the function declaration in the form:
-//
-// (<formal parameters>) { <function body> }
-//
-// without any function keyword or name.
-//
-FunctionLiteral* MakeLazyAST(Handle<Script> script,
- Handle<String> name,
- int start_position,
- int end_position,
- bool is_expression);
+// Support for doing lazy compilation.
+FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info);
// Support for handling complex values (array and object literals) that
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index c08602e..41c674b 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -47,8 +47,15 @@
}
+// This is used for printing out debugging information. It makes an integer
+// that is closely related to the address of an object.
+static int LabelToInt(Label* label) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(label));
+}
+
+
void RegExpMacroAssemblerTracer::Bind(Label* label) {
- PrintF("label[%08x]: (Bind)\n", label, label);
+ PrintF("label[%08x]: (Bind)\n", LabelToInt(label));
assembler_->Bind(label);
}
@@ -60,7 +67,7 @@
void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
- PrintF(" CheckGreedyLoop(label[%08x]);\n\n", label);
+ PrintF(" CheckGreedyLoop(label[%08x]);\n\n", LabelToInt(label));
assembler_->CheckGreedyLoop(label);
}
@@ -84,14 +91,13 @@
void RegExpMacroAssemblerTracer::GoTo(Label* label) {
- PrintF(" GoTo(label[%08x]);\n\n", label);
+ PrintF(" GoTo(label[%08x]);\n\n", LabelToInt(label));
assembler_->GoTo(label);
}
void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
- PrintF(" PushBacktrack(label[%08x]);\n",
- label);
+ PrintF(" PushBacktrack(label[%08x]);\n", LabelToInt(label));
assembler_->PushBacktrack(label);
}
@@ -176,7 +182,7 @@
const char* check_msg = check_bounds ? "" : " (unchecked)";
PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
cp_offset,
- on_end_of_input,
+ LabelToInt(on_end_of_input),
check_msg,
characters);
assembler_->LoadCurrentCharacter(cp_offset,
@@ -187,39 +193,43 @@
void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
- PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n", limit, on_less);
+ PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n",
+ limit, LabelToInt(on_less));
assembler_->CheckCharacterLT(limit, on_less);
}
void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
Label* on_greater) {
- PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n", limit, on_greater);
+ PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n",
+ limit, LabelToInt(on_greater));
assembler_->CheckCharacterGT(limit, on_greater);
}
void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
- PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n", c, on_equal);
+ PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
+ c, LabelToInt(on_equal));
assembler_->CheckCharacter(c, on_equal);
}
void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
- PrintF(" CheckAtStart(label[%08x]);\n", on_at_start);
+ PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
assembler_->CheckAtStart(on_at_start);
}
void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", on_not_at_start);
+ PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
assembler_->CheckNotAtStart(on_not_at_start);
}
void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
- PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n", c, on_not_equal);
+ PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
+ c, LabelToInt(on_not_equal));
assembler_->CheckNotCharacter(c, on_not_equal);
}
@@ -231,7 +241,7 @@
PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
mask,
- on_equal);
+ LabelToInt(on_equal));
assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
}
@@ -243,7 +253,7 @@
PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
mask,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
}
@@ -258,7 +268,7 @@
c,
minus,
mask,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
}
@@ -266,7 +276,7 @@
void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
Label* on_no_match) {
PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- on_no_match);
+ LabelToInt(on_no_match));
assembler_->CheckNotBackReference(start_reg, on_no_match);
}
@@ -275,7 +285,7 @@
int start_reg,
Label* on_no_match) {
PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, on_no_match);
+ start_reg, LabelToInt(on_no_match));
assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
}
@@ -286,7 +296,7 @@
PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
reg1,
reg2,
- on_not_equal);
+ LabelToInt(on_not_equal));
assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
}
@@ -300,7 +310,8 @@
for (int i = 0; i < str.length(); i++) {
PrintF("u%04x", str[i]);
}
- PrintF("\", cp_offset=%d, label[%08x])\n", cp_offset, on_failure);
+ PrintF("\", cp_offset=%d, label[%08x])\n",
+ cp_offset, LabelToInt(on_failure));
assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
}
@@ -312,7 +323,7 @@
on_no_match);
PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type,
- on_no_match,
+ LabelToInt(on_no_match),
supported ? "true" : "false");
return supported;
}
@@ -321,7 +332,7 @@
void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
int comparand, Label* if_lt) {
PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, if_lt);
+ register_index, comparand, LabelToInt(if_lt));
assembler_->IfRegisterLT(register_index, comparand, if_lt);
}
@@ -329,7 +340,7 @@
void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
Label* if_eq) {
PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
- register_index, if_eq);
+ register_index, LabelToInt(if_eq));
assembler_->IfRegisterEqPos(register_index, if_eq);
}
@@ -337,7 +348,7 @@
void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
int comparand, Label* if_ge) {
PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
- register_index, comparand, if_ge);
+ register_index, comparand, LabelToInt(if_ge));
assembler_->IfRegisterGE(register_index, comparand, if_ge);
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 8d58db7..c80f1fc 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2624,15 +2624,15 @@
if (seq_pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = seq_pat->ToAsciiVector();
if (seq_sub->IsAsciiRepresentation()) {
- return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
if (seq_sub->IsAsciiRepresentation()) {
- return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
@@ -2889,67 +2889,39 @@
}
-template <typename schar, typename pchar>
-static bool SearchStringMultiple(Vector<schar> subject,
- String* pattern,
- Vector<pchar> pattern_string,
+template <typename SubjectChar, typename PatternChar>
+static bool SearchStringMultiple(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
+ String* pattern_string,
FixedArrayBuilder* builder,
int* match_pos) {
int pos = *match_pos;
int subject_length = subject.length();
- int pattern_length = pattern_string.length();
+ int pattern_length = pattern.length();
int max_search_start = subject_length - pattern_length;
- bool is_ascii = (sizeof(schar) == 1);
- StringSearchStrategy strategy =
- InitializeStringSearch(pattern_string, is_ascii);
- switch (strategy) {
- case SEARCH_FAIL: break;
- case SEARCH_SHORT:
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = SimpleIndexOf(subject, pattern_string, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ while (pos <= max_search_start) {
+ if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+ *match_pos = pos;
+ return false;
+ }
+ // Position of end of previous match.
+ int match_end = pos + pattern_length;
+ int new_pos = search.Search(subject, match_end);
+ if (new_pos >= 0) {
+ // A match.
+ if (new_pos > match_end) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ new_pos);
}
+ pos = new_pos;
+ builder->Add(pattern_string);
+ } else {
break;
- case SEARCH_LONG:
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- int match_end = pos + pattern_length;
- int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
- if (new_pos >= 0) {
- // A match has been found.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
- }
- break;
+ }
}
+
if (pos < max_search_start) {
ReplacementStringBuilder::AddSubjectSlice(builder,
pos + pattern_length,
@@ -2977,14 +2949,14 @@
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToAsciiVector(),
+ *pattern,
builder,
&match_pos)) break;
} else {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToUC16Vector(),
+ *pattern,
builder,
&match_pos)) break;
}
@@ -2992,14 +2964,14 @@
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToAsciiVector(),
+ *pattern,
builder,
&match_pos)) break;
} else {
if (SearchStringMultiple(subject_vector,
- *pattern,
pattern->ToUC16Vector(),
+ *pattern,
builder,
&match_pos)) break;
}
@@ -4781,51 +4753,23 @@
}
-// Define storage for buffers declared in header file.
-// TODO(lrn): Remove these when rewriting search code.
-int BMBuffers::bad_char_occurrence[kBMAlphabetSize];
-BMGoodSuffixBuffers BMBuffers::bmgs_buffers;
-
-
-template <typename schar, typename pchar>
-void FindStringIndices(Vector<const schar> subject,
- Vector<const pchar> pattern,
+template <typename SubjectChar, typename PatternChar>
+void FindStringIndices(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values.
- StringSearchStrategy strategy =
- InitializeStringSearch(pattern, sizeof(schar) == 1);
- switch (strategy) {
- case SEARCH_FAIL: return;
- case SEARCH_SHORT: {
- int pattern_length = pattern.length();
- int index = 0;
- while (limit > 0) {
- index = SimpleIndexOf(subject, pattern, index);
- if (index < 0) return;
- indices->Add(index);
- index += pattern_length;
- limit--;
- }
- return;
- }
- case SEARCH_LONG: {
- int pattern_length = pattern.length();
- int index = 0;
- while (limit > 0) {
- index = ComplexIndexOf(subject, pattern, index);
- if (index < 0) return;
- indices->Add(index);
- index += pattern_length;
- limit--;
- }
- return;
- }
- default:
- UNREACHABLE();
- return;
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ int pattern_length = pattern.length();
+ int index = 0;
+ while (limit > 0) {
+ index = search.Search(subject, index);
+ if (index < 0) return;
+ indices->Add(index);
+ index += pattern_length;
+ limit--;
}
}
@@ -6430,7 +6374,7 @@
// this means that things called through constructors are never known to
// be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled());
- if (!CompileLazyInLoop(function, Handle<Object>::null(), KEEP_EXCEPTION)) {
+ if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -6801,7 +6745,7 @@
} else if (obj->IsFalse()) {
PrintF("<false>");
} else {
- PrintF("%p", obj);
+ PrintF("%p", reinterpret_cast<void*>(obj));
}
}
@@ -7253,15 +7197,15 @@
Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
visitor->visit(j, e);
} else {
- Handle<Object> e(
- Heap::AllocateHeapNumber(static_cast<ElementType>(val)));
+ Handle<Object> e =
+ Factory::NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e);
}
}
}
} else {
for (uint32_t j = 0; j < len; j++) {
- Handle<Object> e(Heap::AllocateHeapNumber(array->get(j)));
+ Handle<Object> e = Factory::NewNumber(array->get(j));
visitor->visit(j, e);
}
}
diff --git a/src/scopes.cc b/src/scopes.cc
index c4436fe..7f1987e 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -201,7 +201,6 @@
}
-
Variable* Scope::LocalLookup(Handle<String> name) {
return variables_.Lookup(name);
}
diff --git a/src/scopes.h b/src/scopes.h
index 68cf5e5..21040b7 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -34,7 +34,6 @@
namespace v8 {
namespace internal {
-
// A hash map to support fast variable declaration and lookup.
class VariableMap: public HashMap {
public:
@@ -100,8 +99,12 @@
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
- void Initialize(bool inside_with);
+ virtual void Initialize(bool inside_with);
+ // Called just before leaving a scope.
+ virtual void Leave() {
+ // No cleanup or fixup necessary.
+ }
// ---------------------------------------------------------------------------
// Declarations
@@ -272,7 +275,7 @@
bool AllowsLazyCompilation() const;
// True if the outer context of this scope is always the global context.
- bool HasTrivialOuterContext() const;
+ virtual bool HasTrivialOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -378,20 +381,53 @@
};
+// Scope used during pre-parsing.
class DummyScope : public Scope {
public:
- DummyScope() : Scope(GLOBAL_SCOPE) {
+ DummyScope()
+ : Scope(GLOBAL_SCOPE),
+ nesting_level_(1), // Allows us to Leave the initial scope.
+ inside_with_level_(kNotInsideWith) {
outer_scope_ = this;
+ scope_inside_with_ = false;
+ }
+
+ virtual void Initialize(bool inside_with) {
+ nesting_level_++;
+ if (inside_with && inside_with_level_ == kNotInsideWith) {
+ inside_with_level_ = nesting_level_;
+ }
+ ASSERT(inside_with_level_ <= nesting_level_);
+ }
+
+ virtual void Leave() {
+ nesting_level_--;
+ ASSERT(nesting_level_ >= 0);
+ if (nesting_level_ < inside_with_level_) {
+ inside_with_level_ = kNotInsideWith;
+ }
+ ASSERT(inside_with_level_ <= nesting_level_);
}
virtual Variable* Lookup(Handle<String> name) { return NULL; }
- virtual Variable* Declare(Handle<String> name, Variable::Mode mode) {
- return NULL;
- }
+
virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
return NULL;
}
+
virtual VariableProxy* NewTemporary(Handle<String> name) { return NULL; }
+
+ virtual bool HasTrivialOuterContext() const {
+ return (nesting_level_ == 0 || inside_with_level_ <= 0);
+ }
+
+ private:
+ static const int kNotInsideWith = -1;
+ // Number of surrounding scopes of the current scope.
+ int nesting_level_;
+ // Nesting level of outermost scope that is contained in a with statement,
+ // or kNotInsideWith if there are no with's around the current scope.
+ int inside_with_level_;
};
diff --git a/src/spaces.cc b/src/spaces.cc
index 3d2d42f..d824c30 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -270,9 +270,9 @@
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-int MemoryAllocator::capacity_ = 0;
-int MemoryAllocator::size_ = 0;
-int MemoryAllocator::size_executable_ = 0;
+intptr_t MemoryAllocator::capacity_ = 0;
+intptr_t MemoryAllocator::size_ = 0;
+intptr_t MemoryAllocator::size_executable_ = 0;
List<MemoryAllocator::MemoryAllocationCallbackRegistration>
MemoryAllocator::memory_allocation_callbacks_;
@@ -302,7 +302,7 @@
}
-bool MemoryAllocator::Setup(int capacity) {
+bool MemoryAllocator::Setup(intptr_t capacity) {
capacity_ = RoundUp(capacity, Page::kPageSize);
// Over-estimate the size of chunks_ array. It assumes the expansion of old
@@ -314,7 +314,8 @@
//
// Reserve two chunk ids for semispaces, one for map space, one for old
// space, and one for code space.
- max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
+ max_nof_chunks_ =
+ static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
@@ -691,7 +692,9 @@
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", used: %" V8_PTR_PREFIX "d"
+ ", available: %%%d\n\n",
capacity_, size_, static_cast<int>(pct*100));
}
#endif
@@ -769,7 +772,7 @@
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity,
+PagedSpace::PagedSpace(intptr_t max_capacity,
AllocationSpace id,
Executability executable)
: Space(id, executable) {
@@ -797,8 +800,9 @@
Page::kPageSize * pages_in_chunk,
this, &num_pages);
} else {
- int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
- max_capacity_ / Page::kObjectAreaSize);
+ int requested_pages =
+ Min(MemoryAllocator::kPagesPerChunk,
+ static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
first_page_ =
MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
if (!first_page_->is_valid()) return false;
@@ -984,7 +988,8 @@
// Last page must be valid and its next page is invalid.
ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
- int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
+ int available_pages =
+ static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
if (available_pages <= 0) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
@@ -1264,7 +1269,7 @@
void NewSpace::Shrink() {
- int new_capacity = Max(InitialCapacity(), 2 * Size());
+ int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
int rounded_new_capacity =
RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() &&
@@ -1643,7 +1648,8 @@
#ifdef DEBUG
if (FLAG_heap_stats) {
float pct = static_cast<float>(Available()) / Capacity();
- PrintF(" capacity: %d, available: %d, %%%d\n",
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Available(), static_cast<int>(pct*100));
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
@@ -2401,8 +2407,10 @@
void OldSpace::ReportStatistics() {
- int pct = Available() * 100 / Capacity();
- PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ int pct = static_cast<int>(Available() * 100 / Capacity());
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", waste: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
ClearHistograms();
@@ -2558,8 +2566,10 @@
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
- int pct = Available() * 100 / Capacity();
- PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
+ int pct = static_cast<int>(Available() * 100 / Capacity());
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", waste: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
ClearHistograms();
@@ -3011,7 +3021,7 @@
void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %d\n", size_);
+ PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
diff --git a/src/spaces.h b/src/spaces.h
index 94e0cd2..2fdb96f 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -371,7 +371,7 @@
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- virtual int Size() = 0;
+ virtual intptr_t Size() = 0;
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
@@ -491,7 +491,7 @@
public:
// Initializes its internal bookkeeping structures.
// Max capacity of the total space.
- static bool Setup(int max_capacity);
+ static bool Setup(intptr_t max_capacity);
// Deletes valid chunks.
static void TearDown();
@@ -582,16 +582,18 @@
MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
- static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+ static intptr_t Available() {
+ return capacity_ < size_ ? 0 : capacity_ - size_;
+ }
// Returns allocated spaces in bytes.
- static int Size() { return size_; }
+ static intptr_t Size() { return size_; }
// Returns allocated executable spaces in bytes.
- static int SizeExecutable() { return size_executable_; }
+ static intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have.
- static int MaxAvailable() {
+ static intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
@@ -649,12 +651,12 @@
private:
// Maximum space size in bytes.
- static int capacity_;
+ static intptr_t capacity_;
// Allocated space size in bytes.
- static int size_;
+ static intptr_t size_;
// Allocated executable space size in bytes.
- static int size_executable_;
+ static intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -927,10 +929,10 @@
}
// Accessors for the allocation statistics.
- int Capacity() { return capacity_; }
- int Available() { return available_; }
- int Size() { return size_; }
- int Waste() { return waste_; }
+ intptr_t Capacity() { return capacity_; }
+ intptr_t Available() { return available_; }
+ intptr_t Size() { return size_; }
+ intptr_t Waste() { return waste_; }
// Grow the space by adding available bytes.
void ExpandSpace(int size_in_bytes) {
@@ -945,13 +947,13 @@
}
// Allocate from available bytes (available -> size).
- void AllocateBytes(int size_in_bytes) {
+ void AllocateBytes(intptr_t size_in_bytes) {
available_ -= size_in_bytes;
size_ += size_in_bytes;
}
// Free allocated bytes, making them available (size -> available).
- void DeallocateBytes(int size_in_bytes) {
+ void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
available_ += size_in_bytes;
}
@@ -964,23 +966,25 @@
// Consider the wasted bytes to be allocated, as they contain filler
// objects (waste -> size).
- void FillWastedBytes(int size_in_bytes) {
+ void FillWastedBytes(intptr_t size_in_bytes) {
waste_ -= size_in_bytes;
size_ += size_in_bytes;
}
private:
- int capacity_;
- int available_;
- int size_;
- int waste_;
+ intptr_t capacity_;
+ intptr_t available_;
+ intptr_t size_;
+ intptr_t waste_;
};
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
+ PagedSpace(intptr_t max_capacity,
+ AllocationSpace id,
+ Executability executable);
virtual ~PagedSpace() {}
@@ -1031,21 +1035,21 @@
}
// Current capacity without growing (Size() + Available() + Waste()).
- int Capacity() { return accounting_stats_.Capacity(); }
+ intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
- int CommittedMemory() { return Capacity(); }
+ intptr_t CommittedMemory() { return Capacity(); }
// Available bytes without growing.
- int Available() { return accounting_stats_.Available(); }
+ intptr_t Available() { return accounting_stats_.Available(); }
// Allocated bytes in this space.
- virtual int Size() { return accounting_stats_.Size(); }
+ virtual intptr_t Size() { return accounting_stats_.Size(); }
// Wasted bytes due to fragmentation and not recoverable until the
// next GC of this space.
- int Waste() { return accounting_stats_.Waste(); }
+ intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the address of the first object in this space.
Address bottom() { return first_page_->ObjectAreaStart(); }
@@ -1137,7 +1141,7 @@
protected:
// Maximum capacity of this space.
- int max_capacity_;
+ intptr_t max_capacity_;
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -1328,7 +1332,7 @@
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
- virtual int Size() {
+ virtual intptr_t Size() {
UNREACHABLE();
return 0;
}
@@ -1471,22 +1475,26 @@
}
// Return the allocated bytes in the active semispace.
- virtual int Size() { return static_cast<int>(top() - bottom()); }
+ virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+ // The same, but returning an int. We have to have the one that returns
+ // intptr_t because it is inherited, but if we know we are dealing with the
+ // new space, which can't get as big as the other spaces then this is useful:
+ int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace.
- int Capacity() {
+ intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity();
}
// Return the total amount of memory committed for new space.
- int CommittedMemory() {
+ intptr_t CommittedMemory() {
if (from_space_.is_committed()) return 2 * Capacity();
return Capacity();
}
// Return the available bytes without growing in the active semispace.
- int Available() { return Capacity() - Size(); }
+ intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -1681,7 +1689,7 @@
void Reset();
// Return the number of bytes available on the free list.
- int available() { return available_; }
+ intptr_t available() { return available_; }
// Place a node on the free list. The block of size 'size_in_bytes'
// starting at 'start' is placed on the free list. The return value is the
@@ -1783,7 +1791,7 @@
void Reset();
// Return the number of bytes available on the free list.
- int available() { return available_; }
+ intptr_t available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to
// have size object_size_) is placed on the free list. Bookkeeping
@@ -1797,7 +1805,7 @@
private:
// Available bytes on the free list.
- int available_;
+ intptr_t available_;
// The head of the free list.
Address head_;
@@ -1823,7 +1831,7 @@
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(int max_capacity,
+ explicit OldSpace(intptr_t max_capacity,
AllocationSpace id,
Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
@@ -1832,7 +1840,7 @@
// The bytes available on the free list (ie, not above the linear allocation
// pointer).
- int AvailableFree() { return free_list_.available(); }
+ intptr_t AvailableFree() { return free_list_.available(); }
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
@@ -1893,7 +1901,7 @@
class FixedSpace : public PagedSpace {
public:
- FixedSpace(int max_capacity,
+ FixedSpace(intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
@@ -1968,7 +1976,7 @@
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
+ MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex);
@@ -2073,7 +2081,7 @@
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- CellSpace(int max_capacity, AllocationSpace id)
+ CellSpace(intptr_t max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected:
@@ -2129,7 +2137,7 @@
// Given a chunk size, returns the object size it can accommodate. Used by
// LargeObjectSpace::Available.
- static int ObjectSizeFor(int chunk_size) {
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
@@ -2165,11 +2173,11 @@
Object* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
- int Available() {
+ intptr_t Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
- virtual int Size() {
+ virtual intptr_t Size() {
return size_;
}
@@ -2223,7 +2231,7 @@
private:
// The head of the linked list of large object chunks.
LargeObjectChunk* first_chunk_;
- int size_; // allocated bytes
+ intptr_t size_; // allocated bytes
int page_count_; // number of chunks
diff --git a/src/string-search.cc b/src/string-search.cc
new file mode 100644
index 0000000..5687443
--- /dev/null
+++ b/src/string-search.cc
@@ -0,0 +1,40 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "string-search.h"
+
+namespace v8 {
+namespace internal {
+
+// Storage for constants used by string-search.
+
+int StringSearchBase::kBadCharShiftTable[kUC16AlphabetSize];
+int StringSearchBase::kGoodSuffixShiftTable[kBMMaxShift + 1];
+int StringSearchBase::kSuffixTable[kBMMaxShift + 1];
+
+}} // namespace v8::internal
diff --git a/src/string-search.h b/src/string-search.h
index d7959c0..4412e32 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -32,259 +32,320 @@
namespace internal {
-// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
-// limit, we can fix the size of tables. For a needle longer than this limit,
-// search will not be optimal, since we only build tables for a smaller suffix
-// of the string, which is a safe approximation.
-static const int kBMMaxShift = 250;
-// Reduce alphabet to this size.
-// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
-// proportional to the input alphabet. We reduce the alphabet size by
-// equating input characters modulo a smaller alphabet size. This gives
-// a potentially less efficient searching, but is a safe approximation.
-// For needles using only characters in the same Unicode 256-code point page,
-// there is no search speed degradation.
-static const int kBMAlphabetSize = 256;
-// For patterns below this length, the skip length of Boyer-Moore is too short
-// to compensate for the algorithmic overhead compared to simple brute force.
-static const int kBMMinPatternLength = 7;
+//---------------------------------------------------------------------
+// String Search object.
+//---------------------------------------------------------------------
-// Holds the two buffers used by Boyer-Moore string search's Good Suffix
-// shift. Only allows the last kBMMaxShift characters of the needle
-// to be indexed.
-class BMGoodSuffixBuffers {
+// Class holding constants and methods that apply to all string search variants,
+// independently of subject and pattern char size.
+class StringSearchBase {
+ protected:
+ // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
+ // limit, we can fix the size of tables. For a needle longer than this limit,
+ // search will not be optimal, since we only build tables for a suffix
+ // of the string, but it is a safe approximation.
+ static const int kBMMaxShift = 250;
+
+ // Reduce alphabet to this size.
+ // One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
+ // proportional to the input alphabet. We reduce the alphabet size by
+ // equating input characters modulo a smaller alphabet size. This gives
+ // a potentially less efficient searching, but is a safe approximation.
+ // For needles using only characters in the same Unicode 256-code point page,
+ // there is no search speed degradation.
+ static const int kAsciiAlphabetSize = 128;
+ static const int kUC16AlphabetSize = 256;
+
+ // Bad-char shift table stored in the state. It's length is the alphabet size.
+ // For patterns below this length, the skip length of Boyer-Moore is too short
+ // to compensate for the algorithmic overhead compared to simple brute force.
+ static const int kBMMinPatternLength = 7;
+
+ static inline bool IsAsciiString(Vector<const char>) {
+ return true;
+ }
+
+ static inline bool IsAsciiString(Vector<const uc16> string) {
+ for (int i = 0, n = string.length(); i < n; i++) {
+ if (static_cast<unsigned>(string[i]) > String::kMaxAsciiCharCodeU) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // The following tables are shared by all searches.
+ // TODO(lrn): Introduce a way for a pattern to keep its tables
+ // between searches (e.g., for an Atom RegExp).
+
+ // Store for the BoyerMoore(Horspool) bad char shift table.
+ static int kBadCharShiftTable[kUC16AlphabetSize];
+ // Store for the BoyerMoore good suffix shift table.
+ static int kGoodSuffixShiftTable[kBMMaxShift + 1];
+ // Table used temporarily while building the BoyerMoore good suffix
+ // shift table.
+ static int kSuffixTable[kBMMaxShift + 1];
+};
+
+
+template <typename PatternChar, typename SubjectChar>
+class StringSearch : private StringSearchBase {
public:
- BMGoodSuffixBuffers() {}
- inline void Initialize(int needle_length) {
- ASSERT(needle_length > 1);
- int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
- int len = needle_length - start;
- biased_suffixes_ = suffixes_ - start;
- biased_good_suffix_shift_ = good_suffix_shift_ - start;
- for (int i = 0; i <= len; i++) {
- good_suffix_shift_[i] = len;
+ explicit StringSearch(Vector<const PatternChar> pattern)
+ : pattern_(pattern),
+ start_(Max(0, pattern.length() - kBMMaxShift)) {
+ if (sizeof(PatternChar) > sizeof(SubjectChar)) {
+ if (!IsAsciiString(pattern_)) {
+ strategy_ = &FailSearch;
+ return;
+ }
+ }
+ int pattern_length = pattern_.length();
+ if (pattern_length < kBMMinPatternLength) {
+ if (pattern_length == 1) {
+ strategy_ = &SingleCharSearch;
+ return;
+ }
+ strategy_ = &LinearSearch;
+ return;
+ }
+ strategy_ = &InitialSearch;
+ }
+
+ int Search(Vector<const SubjectChar> subject, int index) {
+ return strategy_(this, subject, index);
+ }
+
+ static inline int AlphabetSize() {
+ if (sizeof(PatternChar) == 1) {
+ // ASCII needle.
+ return kAsciiAlphabetSize;
+ } else {
+ ASSERT(sizeof(PatternChar) == 2);
+ // UC16 needle.
+ return kUC16AlphabetSize;
}
}
- inline int& suffix(int index) {
- ASSERT(biased_suffixes_ + index >= suffixes_);
- return biased_suffixes_[index];
- }
- inline int& shift(int index) {
- ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
- return biased_good_suffix_shift_[index];
- }
+
private:
- int suffixes_[kBMMaxShift + 1];
- int good_suffix_shift_[kBMMaxShift + 1];
- int* biased_suffixes_;
- int* biased_good_suffix_shift_;
- DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
+ typedef int (*SearchFunction)( // NOLINT - it's not a cast!
+ StringSearch<PatternChar, SubjectChar>*,
+ Vector<const SubjectChar>,
+ int);
+
+ static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
+ Vector<const SubjectChar>,
+ int) {
+ return -1;
+ }
+
+ static int SingleCharSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int BoyerMooreHorspoolSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index);
+
+ void PopulateBoyerMooreHorspoolTable();
+
+ void PopulateBoyerMooreTable();
+
+ static inline int CharOccurrence(int* bad_char_occurrence,
+ SubjectChar char_code) {
+ if (sizeof(SubjectChar) == 1) {
+ return bad_char_occurrence[static_cast<int>(char_code)];
+ }
+ if (sizeof(PatternChar) == 1) {
+ if (static_cast<unsigned char>(char_code) > String::kMaxAsciiCharCode) {
+ return -1;
+ }
+ return bad_char_occurrence[static_cast<int>(char_code)];
+ }
+ // Reduce to equivalence class.
+ int equiv_class = char_code % kUC16AlphabetSize;
+ return bad_char_occurrence[equiv_class];
+ }
+
+ // Return a table covering the last kBMMaxShift+1 positions of
+ // pattern.
+ int* bad_char_table() {
+ return kBadCharShiftTable;
+ }
+
+ int* good_suffix_shift_table() {
+ // Return biased pointer that maps the range [start_..pattern_.length()
+ // to the kGoodSuffixShiftTable array.
+ return kGoodSuffixShiftTable - start_;
+ }
+
+ int* suffix_table() {
+ // Return biased pointer that maps the range [start_..pattern_.length()
+ // to the kSuffixTable array.
+ return kSuffixTable - start_;
+ }
+
+ // The pattern to search for.
+ Vector<const PatternChar> pattern_;
+ // Pointer to implementation of the search.
+ SearchFunction strategy_;
+ // Cache value of Max(0, pattern_length() - kBMMaxShift)
+ int start_;
};
-// buffers reused by BoyerMoore
-struct BMBuffers {
- public:
- static int bad_char_occurrence[kBMAlphabetSize];
- static BMGoodSuffixBuffers bmgs_buffers;
-};
-// State of the string match tables.
-// SIMPLE: No usable content in the buffers.
-// BOYER_MOORE_HORSPOOL: The bad_char_occurence table has been populated.
-// BOYER_MOORE: The bmgs_buffers tables have also been populated.
-// Whenever starting with a new needle, one should call InitializeStringSearch
-// to determine which search strategy to use, and in the case of a long-needle
-// strategy, the call also initializes the algorithm to SIMPLE.
-enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
-static StringSearchAlgorithm algorithm;
+//---------------------------------------------------------------------
+// Single Character Pattern Search Strategy
+//---------------------------------------------------------------------
-
-// Compute the bad-char table for Boyer-Moore in the static buffer.
-template <typename PatternChar>
-static void BoyerMoorePopulateBadCharTable(Vector<const PatternChar> pattern) {
- // Only preprocess at most kBMMaxShift last characters of pattern.
- int start = Max(pattern.length() - kBMMaxShift, 0);
- // Run forwards to populate bad_char_table, so that *last* instance
- // of character equivalence class is the one registered.
- // Notice: Doesn't include the last character.
- int table_size = (sizeof(PatternChar) == 1) ? String::kMaxAsciiCharCode + 1
- : kBMAlphabetSize;
- if (start == 0) { // All patterns less than kBMMaxShift in length.
- memset(BMBuffers::bad_char_occurrence,
- -1,
- table_size * sizeof(*BMBuffers::bad_char_occurrence));
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ ASSERT_EQ(1, search->pattern_.length());
+ PatternChar pattern_first_char = search->pattern_[0];
+ int i = index;
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ subject.length() - i));
+ if (pos == NULL) return -1;
+ return static_cast<int>(pos - subject.start());
} else {
- for (int i = 0; i < table_size; i++) {
- BMBuffers::bad_char_occurrence[i] = start - 1;
- }
- }
- for (int i = start; i < pattern.length() - 1; i++) {
- PatternChar c = pattern[i];
- int bucket = (sizeof(PatternChar) ==1) ? c : c % kBMAlphabetSize;
- BMBuffers::bad_char_occurrence[bucket] = i;
- }
-}
-
-
-template <typename PatternChar>
-static void BoyerMoorePopulateGoodSuffixTable(
- Vector<const PatternChar> pattern) {
- int m = pattern.length();
- int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
- int len = m - start;
- // Compute Good Suffix tables.
- BMBuffers::bmgs_buffers.Initialize(m);
-
- BMBuffers::bmgs_buffers.shift(m-1) = 1;
- BMBuffers::bmgs_buffers.suffix(m) = m + 1;
- PatternChar last_char = pattern[m - 1];
- int suffix = m + 1;
- {
- int i = m;
- while (i > start) {
- PatternChar c = pattern[i - 1];
- while (suffix <= m && c != pattern[suffix - 1]) {
- if (BMBuffers::bmgs_buffers.shift(suffix) == len) {
- BMBuffers::bmgs_buffers.shift(suffix) = suffix - i;
- }
- suffix = BMBuffers::bmgs_buffers.suffix(suffix);
- }
- BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
- if (suffix == m) {
- // No suffix to extend, so we check against last_char only.
- while ((i > start) && (pattern[i - 1] != last_char)) {
- if (BMBuffers::bmgs_buffers.shift(m) == len) {
- BMBuffers::bmgs_buffers.shift(m) = m - i;
- }
- BMBuffers::bmgs_buffers.suffix(--i) = m;
- }
- if (i > start) {
- BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
- }
- }
- }
- }
- if (suffix < m) {
- for (int i = start; i <= m; i++) {
- if (BMBuffers::bmgs_buffers.shift(i) == len) {
- BMBuffers::bmgs_buffers.shift(i) = suffix - start;
- }
- if (i == suffix) {
- suffix = BMBuffers::bmgs_buffers.suffix(suffix);
- }
- }
- }
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static inline int CharOccurrence(int char_code) {
- if (sizeof(SubjectChar) == 1) {
- return BMBuffers::bad_char_occurrence[char_code];
- }
- if (sizeof(PatternChar) == 1) {
- if (char_code > String::kMaxAsciiCharCode) {
- return -1;
- }
- return BMBuffers::bad_char_occurrence[char_code];
- }
- return BMBuffers::bad_char_occurrence[char_code % kBMAlphabetSize];
-}
-
-
-// Restricted simplified Boyer-Moore string matching.
-// Uses only the bad-shift table of Boyer-Moore and only uses it
-// for the character compared to the last character of the needle.
-template <typename SubjectChar, typename PatternChar>
-static int BoyerMooreHorspool(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index,
- bool* complete) {
- ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
- int n = subject.length();
- int m = pattern.length();
-
- int badness = -m;
-
- // How bad we are doing without a good-suffix table.
- int idx; // No matches found prior to this index.
- PatternChar last_char = pattern[m - 1];
- int last_char_shift =
- m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
- // Perform search
- for (idx = start_index; idx <= n - m;) {
- int j = m - 1;
- int c;
- while (last_char != (c = subject[idx + j])) {
- int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
- int shift = j - bc_occ;
- idx += shift;
- badness += 1 - shift; // at most zero, so badness cannot increase.
- if (idx > n - m) {
- *complete = true;
+ if (sizeof(PatternChar) > sizeof(SubjectChar)) {
+ if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
return -1;
}
}
- j--;
- while (j >= 0 && pattern[j] == (subject[idx + j])) j--;
- if (j < 0) {
- *complete = true;
- return idx;
+ SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
+ int n = subject.length();
+ while (i < n) {
+ if (subject[i++] == search_char) return i - 1;
+ }
+ return -1;
+ }
+}
+
+//---------------------------------------------------------------------
+// Linear Search Strategy
+//---------------------------------------------------------------------
+
+
+template <typename PatternChar, typename SubjectChar>
+static inline bool CharCompare(const PatternChar* pattern,
+ const SubjectChar* subject,
+ int length) {
+ ASSERT(length > 0);
+ int pos = 0;
+ do {
+ if (pattern[pos] != subject[pos]) {
+ return false;
+ }
+ pos++;
+ } while (pos < length);
+ return true;
+}
+
+
+// Simple linear search for short patterns. Never bails out.
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::LinearSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ ASSERT(pattern.length() > 1);
+ int pattern_length = pattern.length();
+ PatternChar pattern_first_char = pattern[0];
+ int i = index;
+ int n = subject.length() - pattern_length;
+ while (i <= n) {
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ n - i + 1));
+ if (pos == NULL) return -1;
+ i = static_cast<int>(pos - subject.start()) + 1;
} else {
- idx += last_char_shift;
- // Badness increases by the number of characters we have
- // checked, and decreases by the number of characters we
- // can skip by shifting. It's a measure of how we are doing
- // compared to reading each character exactly once.
- badness += (m - j) - last_char_shift;
- if (badness > 0) {
- *complete = false;
- return idx;
- }
+ if (subject[i++] != pattern_first_char) continue;
+ }
+ // Loop extracted to separate function to allow using return to do
+ // a deeper break.
+ if (CharCompare(pattern.start() + 1,
+ subject.start() + i,
+ pattern_length - 1)) {
+ return i - 1;
}
}
- *complete = true;
return -1;
}
+//---------------------------------------------------------------------
+// Boyer-Moore string search
+//---------------------------------------------------------------------
-template <typename SubjectChar, typename PatternChar>
-static int BoyerMooreIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx) {
- ASSERT(algorithm <= BOYER_MOORE);
- int n = subject.length();
- int m = pattern.length();
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ int subject_length = subject.length();
+ int pattern_length = pattern.length();
// Only preprocess at most kBMMaxShift last characters of pattern.
- int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
+ int start = search->start_;
- PatternChar last_char = pattern[m - 1];
+ int* bad_char_occurence = search->bad_char_table();
+ int* good_suffix_shift = search->good_suffix_shift_table();
+
+ PatternChar last_char = pattern[pattern_length - 1];
+ int index = start_index;
// Continue search from i.
- while (idx <= n - m) {
- int j = m - 1;
- SubjectChar c;
- while (last_char != (c = subject[idx + j])) {
- int shift = j - CharOccurrence<SubjectChar, PatternChar>(c);
- idx += shift;
- if (idx > n - m) {
+ while (index <= subject_length - pattern_length) {
+ int j = pattern_length - 1;
+ int c;
+ while (last_char != (c = subject[index + j])) {
+ int shift =
+ j - CharOccurrence(bad_char_occurence, c);
+ index += shift;
+ if (index > subject_length - pattern_length) {
return -1;
}
}
- while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
+ while (j >= 0 && pattern[j] == (c = subject[index + j])) j--;
if (j < 0) {
- return idx;
+ return index;
} else if (j < start) {
// we have matched more than our tables allow us to be smart about.
// Fall back on BMH shift.
- idx += m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
+ index += pattern_length - 1
+ - CharOccurrence(bad_char_occurence,
+ static_cast<SubjectChar>(last_char));
} else {
- int gs_shift = BMBuffers::bmgs_buffers.shift(j + 1);
- int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
+ int gs_shift = good_suffix_shift[j + 1];
+ int bc_occ =
+ CharOccurrence(bad_char_occurence, c);
int shift = j - bc_occ;
if (gs_shift > shift) {
shift = gs_shift;
}
- idx += shift;
+ index += shift;
}
}
@@ -292,18 +353,163 @@
}
-// Trivial string search for shorter strings.
-// On return, if "complete" is set to true, the return value is the
-// final result of searching for the patter in the subject.
-// If "complete" is set to false, the return value is the index where
-// further checking should start, i.e., it's guaranteed that the pattern
-// does not occur at a position prior to the returned index.
template <typename PatternChar, typename SubjectChar>
-static int SimpleIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx,
- bool* complete) {
- ASSERT(pattern.length() > 1);
+void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
+ int pattern_length = pattern_.length();
+ const PatternChar* pattern = pattern_.start();
+ // Only look at the last kBMMaxShift characters of pattern (from start_
+ // to pattern_length).
+ int start = start_;
+ int length = pattern_length - start;
+
+ // Biased tables so that we can use pattern indices as table indices,
+ // even if we only cover the part of the pattern from offset start.
+ int* shift_table = good_suffix_shift_table();
+ int* suffix_table = this->suffix_table();
+
+ // Initialize table.
+ for (int i = start; i < pattern_length; i++) {
+ shift_table[i] = length;
+ }
+ shift_table[pattern_length] = 1;
+ suffix_table[pattern_length] = pattern_length + 1;
+
+ // Find suffixes.
+ PatternChar last_char = pattern[pattern_length - 1];
+ int suffix = pattern_length + 1;
+ {
+ int i = pattern_length;
+ while (i > start) {
+ PatternChar c = pattern[i - 1];
+ while (suffix <= pattern_length && c != pattern[suffix - 1]) {
+ if (shift_table[suffix] == length) {
+ shift_table[suffix] = suffix - i;
+ }
+ suffix = suffix_table[suffix];
+ }
+ suffix_table[--i] = --suffix;
+ if (suffix == pattern_length) {
+ // No suffix to extend, so we check against last_char only.
+ while ((i > start) && (pattern[i - 1] != last_char)) {
+ if (shift_table[pattern_length] == length) {
+ shift_table[pattern_length] = pattern_length - i;
+ }
+ suffix_table[--i] = pattern_length;
+ }
+ if (i > start) {
+ suffix_table[--i] = --suffix;
+ }
+ }
+ }
+ }
+ // Build shift table using suffixes.
+ if (suffix < pattern_length) {
+ for (int i = start; i <= pattern_length; i++) {
+ if (shift_table[i] == length) {
+ shift_table[i] = suffix - start;
+ }
+ if (i == suffix) {
+ suffix = suffix_table[suffix];
+ }
+ }
+ }
+}
+
+//---------------------------------------------------------------------
+// Boyer-Moore-Horspool string search.
+//---------------------------------------------------------------------
+
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int start_index) {
+ Vector<const PatternChar> pattern = search->pattern_;
+ int subject_length = subject.length();
+ int pattern_length = pattern.length();
+ int* char_occurrences = search->bad_char_table();
+ int badness = -pattern_length;
+
+ // How bad we are doing without a good-suffix table.
+ PatternChar last_char = pattern[pattern_length - 1];
+ int last_char_shift = pattern_length - 1 -
+ CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
+ // Perform search
+ int index = start_index; // No matches found prior to this index.
+ while (index <= subject_length - pattern_length) {
+ int j = pattern_length - 1;
+ int subject_char;
+ while (last_char != (subject_char = subject[index + j])) {
+ int bc_occ = CharOccurrence(char_occurrences, subject_char);
+ int shift = j - bc_occ;
+ index += shift;
+ badness += 1 - shift; // at most zero, so badness cannot increase.
+ if (index > subject_length - pattern_length) {
+ return -1;
+ }
+ }
+ j--;
+ while (j >= 0 && pattern[j] == (subject[index + j])) j--;
+ if (j < 0) {
+ return index;
+ } else {
+ index += last_char_shift;
+ // Badness increases by the number of characters we have
+ // checked, and decreases by the number of characters we
+ // can skip by shifting. It's a measure of how we are doing
+ // compared to reading each character exactly once.
+ badness += (pattern_length - j) - last_char_shift;
+ if (badness > 0) {
+ search->PopulateBoyerMooreTable();
+ search->strategy_ = &BoyerMooreSearch;
+ return BoyerMooreSearch(search, subject, index);
+ }
+ }
+ }
+ return -1;
+}
+
+
+template <typename PatternChar, typename SubjectChar>
+void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
+ int pattern_length = pattern_.length();
+
+ int* bad_char_occurrence = bad_char_table();
+
+ // Only preprocess at most kBMMaxShift last characters of pattern.
+ int start = start_;
+ // Run forwards to populate bad_char_table, so that *last* instance
+ // of character equivalence class is the one registered.
+ // Notice: Doesn't include the last character.
+ int table_size = AlphabetSize();
+ if (start == 0) { // All patterns less than kBMMaxShift in length.
+ memset(bad_char_occurrence,
+ -1,
+ table_size * sizeof(*bad_char_occurrence));
+ } else {
+ for (int i = 0; i < table_size; i++) {
+ bad_char_occurrence[i] = start - 1;
+ }
+ }
+ for (int i = start; i < pattern_length - 1; i++) {
+ PatternChar c = pattern_[i];
+ int bucket = (sizeof(PatternChar) == 1) ? c : c % AlphabetSize();
+ bad_char_occurrence[bucket] = i;
+ }
+}
+
+//---------------------------------------------------------------------
+// Linear string search with bailout to BMH.
+//---------------------------------------------------------------------
+
+// Simple linear search for short patterns, which bails out if the string
+// isn't found very early in the subject. Upgrades to BoyerMooreHorspool.
+template <typename PatternChar, typename SubjectChar>
+int StringSearch<PatternChar, SubjectChar>::InitialSearch(
+ StringSearch<PatternChar, SubjectChar>* search,
+ Vector<const SubjectChar> subject,
+ int index) {
+ Vector<const PatternChar> pattern = search->pattern_;
int pattern_length = pattern.length();
// Badness is a count of how much work we have done. When we have
// done enough work we decide it's probably worth switching to a better
@@ -313,149 +519,52 @@
// We know our pattern is at least 2 characters, we cache the first so
// the common case of the first character not matching is faster.
PatternChar pattern_first_char = pattern[0];
- for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
+ for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
badness++;
- if (badness > 0) {
- *complete = false;
- return i;
- }
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) {
- *complete = true;
- return -1;
+ if (badness <= 0) {
+ if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
+ const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + i,
+ pattern_first_char,
+ n - i + 1));
+ if (pos == NULL) {
+ return -1;
+ }
+ i = static_cast<int>(pos - subject.start());
+ } else {
+ if (subject[i] != pattern_first_char) continue;
}
- i = static_cast<int>(pos - subject.start());
+ int j = 1;
+ do {
+ if (pattern[j] != subject[i + j]) {
+ break;
+ }
+ j++;
+ } while (j < pattern_length);
+ if (j == pattern_length) {
+ return i;
+ }
+ badness += j;
} else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- do {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- } while (j < pattern_length);
- if (j == pattern_length) {
- *complete = true;
- return i;
- }
- badness += j;
- }
- *complete = true;
- return -1;
-}
-
-// Simple indexOf that never bails out. For short patterns only.
-template <typename PatternChar, typename SubjectChar>
-static int SimpleIndexOf(Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int idx) {
- int pattern_length = pattern.length();
- PatternChar pattern_first_char = pattern[0];
- for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) return -1;
- i = static_cast<int>(pos - subject.start());
- } else {
- if (subject[i] != pattern_first_char) continue;
- }
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i+j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
+ search->PopulateBoyerMooreHorspoolTable();
+ search->strategy_ = &BoyerMooreHorspoolSearch;
+ return BoyerMooreHorspoolSearch(search, subject, i);
}
}
return -1;
}
-// Strategy for searching for a string in another string.
-enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
-
-
-template <typename PatternChar>
-static inline StringSearchStrategy InitializeStringSearch(
- Vector<const PatternChar> pat, bool ascii_subject) {
- // We have an ASCII haystack and a non-ASCII needle. Check if there
- // really is a non-ASCII character in the needle and bail out if there
- // is.
- if (ascii_subject && sizeof(PatternChar) > 1) {
- for (int i = 0; i < pat.length(); i++) {
- uc16 c = pat[i];
- if (c > String::kMaxAsciiCharCode) {
- return SEARCH_FAIL;
- }
- }
- }
- if (pat.length() < kBMMinPatternLength) {
- return SEARCH_SHORT;
- }
- algorithm = SIMPLE_SEARCH;
- return SEARCH_LONG;
-}
-
-
-// Dispatch long needle searches to different algorithms.
+// Perform a a single stand-alone search.
+// If searching multiple times for the same pattern, a search
+// object should be constructed once and the Search function then called
+// for each search.
template <typename SubjectChar, typename PatternChar>
-static int ComplexIndexOf(Vector<const SubjectChar> sub,
- Vector<const PatternChar> pat,
- int start_index) {
- ASSERT(pat.length() >= kBMMinPatternLength);
- // Try algorithms in order of increasing setup cost and expected performance.
- bool complete;
- int idx = start_index;
- switch (algorithm) {
- case SIMPLE_SEARCH:
- idx = SimpleIndexOf(sub, pat, idx, &complete);
- if (complete) return idx;
- BoyerMoorePopulateBadCharTable(pat);
- algorithm = BOYER_MOORE_HORSPOOL;
- // FALLTHROUGH.
- case BOYER_MOORE_HORSPOOL:
- idx = BoyerMooreHorspool(sub, pat, idx, &complete);
- if (complete) return idx;
- // Build the Good Suffix table and continue searching.
- BoyerMoorePopulateGoodSuffixTable(pat);
- algorithm = BOYER_MOORE;
- // FALLTHROUGH.
- case BOYER_MOORE:
- return BoyerMooreIndexOf(sub, pat, idx);
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Dispatch to different search strategies for a single search.
-// If searching multiple times on the same needle, the search
-// strategy should only be computed once and then dispatch to different
-// loops.
-template <typename SubjectChar, typename PatternChar>
-static int StringSearch(Vector<const SubjectChar> sub,
- Vector<const PatternChar> pat,
+static int SearchString(Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
int start_index) {
- bool ascii_subject = (sizeof(SubjectChar) == 1);
- StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
- switch (strategy) {
- case SEARCH_FAIL: return -1;
- case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
- case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
- }
- UNREACHABLE();
- return -1;
+ StringSearch<PatternChar, SubjectChar> search(pattern);
+ return search.Search(subject, start_index);
}
}} // namespace v8::internal
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 6b41577..e6df1b4 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -988,6 +988,7 @@
Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
JSObject* receiver = JSObject::cast(args[0]);
+ ASSERT(Smi::cast(args[1])->value() >= 0);
uint32_t index = Smi::cast(args[1])->value();
return receiver->GetElementWithInterceptor(receiver, index);
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index c47cab7..e4a9e95 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -626,7 +626,8 @@
V(String.prototype, charCodeAt, StringCharCodeAt) \
V(String.prototype, charAt, StringCharAt) \
V(String, fromCharCode, StringFromCharCode) \
- V(Math, floor, MathFloor)
+ V(Math, floor, MathFloor) \
+ V(Math, abs, MathAbs)
class CallStubCompiler: public StubCompiler {
diff --git a/src/utils.h b/src/utils.h
index fefbfe9..ffdb639 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -222,11 +222,21 @@
// ----------------------------------------------------------------------------
// I/O support.
-// Our version of printf(). Avoids compilation errors that we get
-// with standard printf when attempting to print pointers, etc.
-// (the errors are due to the extra compilation flags, which we
-// want elsewhere).
-void PrintF(const char* format, ...);
+#if __GNUC__ >= 4
+// On gcc we can ask the compiler to check the types of %d-style format
+// specifiers and their associated arguments. TODO(erikcorry) fix this
+// so it works on MacOSX.
+#if defined(__MACH__) && defined(__APPLE__)
+#define PRINTF_CHECKING
+#else // MacOsX.
+#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
+#endif
+#else
+#define PRINTF_CHECKING
+#endif
+
+// Our version of printf().
+void PRINTF_CHECKING PrintF(const char* format, ...);
// Our version of fflush.
void Flush();
diff --git a/src/version.cc b/src/version.cc
index ad5e1d6..c284123 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 4
-#define BUILD_NUMBER 6
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 62e7691..814da76 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -893,8 +893,9 @@
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
+ // Check that the key is an array index, that is Uint32.
+ STATIC_ASSERT(kSmiValueSize <= 32);
+ __ JumpIfNotPositiveSmi(rax, &slow);
// Get the map of the receiver.
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 68b18a2..eb48da9 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1558,6 +1558,109 @@
}
+Object* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(rax, ¬_smi);
+ __ SmiToInteger32(rax, rax);
+
+ // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
+ // otherwise.
+ __ movl(rbx, rax);
+ __ sarl(rbx, Immediate(kBitsPerInt - 1));
+
+ // Do bitwise not or do nothing depending on ebx.
+ __ xorl(rax, rbx);
+
+ // Add 1 or do nothing depending on ebx.
+ __ subl(rax, rbx);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ j(negative, &slow);
+
+ // Smi case done.
+ __ Integer32ToSmi(rax, rax);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is a heap number and load its value.
+ __ bind(¬_smi);
+ __ CheckMap(rax, Factory::heap_number_map(), &slow, true);
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ const int sign_mask_shift =
+ (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
+ __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
+ RelocInfo::NONE);
+ __ testq(rbx, rdi);
+ __ j(not_zero, &negative_sign);
+ __ ret(2 * kPointerSize);
+
+ // If the argument is negative, clear the sign, and return a new
+ // number. We still have the sign mask in rdi.
+ __ bind(&negative_sign);
+ __ xor_(rbx, rdi);
+ __ AllocateHeapNumber(rax, rdx, &slow);
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // rcx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {