Version 1.3.2.
Started new compiler infrastructure for two-pass compilation using a control flow graph constructed from the AST.
Profiler stack sampling for X64.
Safe handling of NaN to Posix platform-dependent time functions.
Added a new profiler control API to unify controlling various aspects for profiling.
Fixed issue 392.
git-svn-id: http://v8.googlecode.com/svn/trunk@2624 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index f9f9634..a9669a1 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -36,25 +36,26 @@
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
- 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
- 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
- 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
- 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
- 'global-handles.cc', 'handles.cc', 'hashmap.cc',
- 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
- 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
- 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc', 'runtime.cc',
- 'scanner.cc', 'scopeinfo.cc', 'scopes.cc', 'serialize.cc',
- 'snapshot-common.cc', 'spaces.cc', 'string-stream.cc', 'stub-cache.cc',
- 'token.cc', 'top.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc',
- 'v8-counters.cc', 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
+ 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
+ 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
+ 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
+ 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
+ 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
+ 'hashmap.cc', 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc',
+ 'jsregexp.cc', 'jump-target.cc', 'log.cc', 'log-utils.cc',
+ 'mark-compact.cc', 'messages.cc', 'objects.cc', 'oprofile-agent.cc',
+ 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+ 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
+ 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc',
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
@@ -63,7 +64,7 @@
'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
@@ -72,7 +73,7 @@
'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc',
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
diff --git a/src/api.cc b/src/api.cc
index 0828101..9bc623a 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3214,21 +3214,21 @@
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler();
+ i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::ResumeProfiler();
+ i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
#endif
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::IsProfilerPaused();
+ return i::Logger::GetActiveProfilerModules() & PROFILER_MODULE_CPU;
#else
return true;
#endif
@@ -3237,11 +3237,19 @@
void V8::ResumeProfilerEx(int flags) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (flags & PROFILER_MODULE_CPU) {
- i::Logger::ResumeProfiler();
- }
- if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- i::FLAG_log_gc = true;
+ if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
+ // Snapshot mode: resume modules, perform GC, then pause only
+ // those modules which haven't been started prior to making a
+ // snapshot.
+
+ // Reset snapshot flag and CPU module flags.
+ flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
+ const int current_flags = i::Logger::GetActiveProfilerModules();
+ i::Logger::ResumeProfiler(flags);
+ i::Heap::CollectAllGarbage();
+ i::Logger::PauseProfiler(~current_flags & flags);
+ } else {
+ i::Logger::ResumeProfiler(flags);
}
#endif
}
@@ -3249,26 +3257,14 @@
void V8::PauseProfilerEx(int flags) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (flags & PROFILER_MODULE_CPU) {
- i::Logger::PauseProfiler();
- }
- if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- i::FLAG_log_gc = false;
- }
+ i::Logger::PauseProfiler(flags);
#endif
}
int V8::GetActiveProfilerModules() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- int result = PROFILER_MODULE_NONE;
- if (!i::Logger::IsProfilerPaused()) {
- result |= PROFILER_MODULE_CPU;
- }
- if (i::FLAG_log_gc) {
- result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
- }
- return result;
+ return i::Logger::GetActiveProfilerModules();
#else
return PROFILER_MODULE_NONE;
#endif
diff --git a/src/arm/cfg-arm.cc b/src/arm/cfg-arm.cc
new file mode 100644
index 0000000..109067b
--- /dev/null
+++ b/src/arm/cfg-arm.cc
@@ -0,0 +1,124 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "macro-assembler-arm.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ mov(ip, Operand(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(r0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ add(sp, sp, Operand((count + 1) * kPointerSize));
+ __ Jump(lr);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ Comment cmnt(masm, "[ ReturnInstr");
+ value_->ToRegister(masm, r0);
+}
+
+
+void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+ __ mov(reg, Operand(handle_));
+}
+
+
+void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
+ switch (type_) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ldr(reg, MemOperand(fp, (1 + count - index_) * kPointerSize));
+ break;
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ __ ldr(reg, MemOperand(fp, kOffset - index_ * kPointerSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 406d43d..3a309ac 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -821,9 +821,6 @@
return (variable == NULL) ? false : variable->is_arguments();
}
- // If this assertion fails it means that some code has tried to
- // treat the special "this" variable as an ordinary variable with
- // the name "this".
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
UseCount* var_uses() { return &var_uses_; }
diff --git a/src/cfg.cc b/src/cfg.cc
new file mode 100644
index 0000000..bad1441
--- /dev/null
+++ b/src/cfg.cc
@@ -0,0 +1,485 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "cfg.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+CfgGlobals* CfgGlobals::top_ = NULL;
+
+
+CfgGlobals::CfgGlobals(FunctionLiteral* fun)
+ : global_fun_(fun),
+ global_exit_(new ExitNode()),
+#ifdef DEBUG
+ node_counter_(0),
+#endif
+ previous_(top_) {
+ top_ = this;
+}
+
+
+#define BAILOUT(reason) \
+ do { return NULL; } while (false)
+
+Cfg* Cfg::Build() {
+ FunctionLiteral* fun = CfgGlobals::current()->fun();
+ if (fun->scope()->num_heap_slots() > 0) {
+ BAILOUT("function has context slots");
+ }
+ if (fun->scope()->arguments() != NULL) {
+ BAILOUT("function uses .arguments");
+ }
+
+ ZoneList<Statement*>* body = fun->body();
+ if (body->is_empty()) {
+ BAILOUT("empty function body");
+ }
+
+ StatementBuilder builder;
+ builder.VisitStatements(body);
+ Cfg* cfg = builder.cfg();
+ if (cfg == NULL) {
+ BAILOUT("unsupported statement type");
+ }
+ if (cfg->has_exit()) {
+ BAILOUT("control path without explicit return");
+ }
+ cfg->PrependEntryNode();
+ return cfg;
+}
+
+#undef BAILOUT
+
+
+void Cfg::PrependEntryNode() {
+ ASSERT(!is_empty());
+ entry_ = new EntryNode(InstructionBlock::cast(entry()));
+}
+
+
+void Cfg::Append(Instruction* instr) {
+ ASSERT(has_exit());
+ ASSERT(!is_empty());
+ InstructionBlock::cast(exit_)->Append(instr);
+}
+
+
+void Cfg::AppendReturnInstruction(Value* value) {
+ Append(new ReturnInstr(value));
+ ExitNode* global_exit = CfgGlobals::current()->exit();
+ InstructionBlock::cast(exit_)->set_successor(global_exit);
+ exit_ = NULL;
+}
+
+
+void InstructionBlock::Unmark() {
+ if (is_marked_) {
+ is_marked_ = false;
+ successor_->Unmark();
+ }
+}
+
+
+void EntryNode::Unmark() {
+ if (is_marked_) {
+ is_marked_ = false;
+ successor_->Unmark();
+ }
+}
+
+
+void ExitNode::Unmark() {
+ is_marked_ = false;
+}
+
+
+Handle<Code> Cfg::Compile(Handle<Script> script) {
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
+ entry()->Compile(masm);
+ entry()->Unmark();
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ FunctionLiteral* fun = CfgGlobals::current()->fun();
+ ZoneScopeInfo info(fun->scope());
+ InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
+ Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, masm);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code) {
+ // Print the source code if available.
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(fun->start_position());
+ // fun->end_position() points to the last character in the
+ // stream. We need to compensate by adding one to calculate the
+ // length.
+ int source_len = fun->end_position() - fun->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
+ }
+ PrintF("--- Code ---\n");
+ code->Disassemble(*fun->name()->ToCString());
+ }
+#endif
+
+ return code;
+}
+
+
+// The expression builder should not be used for declarations or statements.
+void ExpressionBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
+
+#define DEFINE_VISIT(type) \
+ void ExpressionBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
+STATEMENT_NODE_LIST(DEFINE_VISIT)
+#undef DEFINE_VISIT
+
+
+// Macros (temporarily) handling unsupported expression types.
+#define BAILOUT(reason) \
+ do { \
+ value_ = NULL; \
+ return; \
+ } while (false)
+
+#define CHECK_BAILOUT() \
+ if (value_ == NULL) { return; } else {}
+
+void ExpressionBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ BAILOUT("FunctionLiteral");
+}
+
+
+void ExpressionBuilder::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void ExpressionBuilder::VisitConditional(Conditional* expr) {
+ BAILOUT("Conditional");
+}
+
+
+void ExpressionBuilder::VisitSlot(Slot* expr) {
+ BAILOUT("Slot");
+}
+
+
+void ExpressionBuilder::VisitVariableProxy(VariableProxy* expr) {
+ Expression* rewrite = expr->var()->rewrite();
+ if (rewrite == NULL || rewrite->AsSlot() == NULL) {
+ BAILOUT("unsupported variable (not a slot)");
+ }
+ Slot* slot = rewrite->AsSlot();
+ if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
+ BAILOUT("unsupported slot type (not a parameter or local)");
+ }
+ value_ = new SlotLocation(slot->type(), slot->index());
+}
+
+
+void ExpressionBuilder::VisitLiteral(Literal* expr) {
+ value_ = new Constant(expr->handle());
+}
+
+
+void ExpressionBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ BAILOUT("RegExpLiteral");
+}
+
+
+void ExpressionBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ BAILOUT("ObjectLiteral");
+}
+
+
+void ExpressionBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ BAILOUT("ArrayLiteral");
+}
+
+
+void ExpressionBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ BAILOUT("CatchExtensionObject");
+}
+
+
+void ExpressionBuilder::VisitAssignment(Assignment* expr) {
+ BAILOUT("Assignment");
+}
+
+
+void ExpressionBuilder::VisitThrow(Throw* expr) {
+ BAILOUT("Throw");
+}
+
+
+void ExpressionBuilder::VisitProperty(Property* expr) {
+ BAILOUT("Property");
+}
+
+
+void ExpressionBuilder::VisitCall(Call* expr) {
+ BAILOUT("Call");
+}
+
+
+void ExpressionBuilder::VisitCallEval(CallEval* expr) {
+ BAILOUT("CallEval");
+}
+
+
+void ExpressionBuilder::VisitCallNew(CallNew* expr) {
+ BAILOUT("CallNew");
+}
+
+
+void ExpressionBuilder::VisitCallRuntime(CallRuntime* expr) {
+ BAILOUT("CallRuntime");
+}
+
+
+void ExpressionBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+ BAILOUT("UnaryOperation");
+}
+
+
+void ExpressionBuilder::VisitCountOperation(CountOperation* expr) {
+ BAILOUT("CountOperation");
+}
+
+
+void ExpressionBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+ BAILOUT("BinaryOperation");
+}
+
+
+void ExpressionBuilder::VisitCompareOperation(CompareOperation* expr) {
+ BAILOUT("CompareOperation");
+}
+
+
+void ExpressionBuilder::VisitThisFunction(ThisFunction* expr) {
+ BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
+// Macros (temporarily) handling unsupported statement types.
+#define BAILOUT(reason) \
+ do { \
+ cfg_ = NULL; \
+ return; \
+ } while (false)
+
+#define CHECK_BAILOUT() \
+ if (cfg_ == NULL) { return; } else {}
+
+void StatementBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT();
+ if (!cfg_->has_exit()) return;
+ }
+}
+
+
+// The statement builder should not be used for declarations or expressions.
+void StatementBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
+
+#define DEFINE_VISIT(type) \
+ void StatementBuilder::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEFINE_VISIT)
+#undef DEFINE_VISIT
+
+
+void StatementBuilder::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void StatementBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ BAILOUT("ExpressionStatement");
+}
+
+
+void StatementBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void StatementBuilder::VisitIfStatement(IfStatement* stmt) {
+ BAILOUT("IfStatement");
+}
+
+
+void StatementBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ BAILOUT("ContinueStatement");
+}
+
+
+void StatementBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ BAILOUT("BreakStatement");
+}
+
+
+void StatementBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ ExpressionBuilder builder;
+ builder.Visit(stmt->expression());
+ Value* value = builder.value();
+ if (value == NULL) BAILOUT("unsupported expression type");
+ cfg_->AppendReturnInstruction(value);
+}
+
+
+void StatementBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ BAILOUT("WithEnterStatement");
+}
+
+
+void StatementBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+ BAILOUT("WithExitStatement");
+}
+
+
+void StatementBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
+
+
+void StatementBuilder::VisitLoopStatement(LoopStatement* stmt) {
+ BAILOUT("LoopStatement");
+}
+
+
+void StatementBuilder::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
+}
+
+
+void StatementBuilder::VisitTryCatch(TryCatch* stmt) {
+ BAILOUT("TryCatch");
+}
+
+
+void StatementBuilder::VisitTryFinally(TryFinally* stmt) {
+ BAILOUT("TryFinally");
+}
+
+
+void StatementBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ BAILOUT("DebuggerStatement");
+}
+
+
+#ifdef DEBUG
+// CFG printing support (via depth-first, preorder block traversal).
+
+void Cfg::Print() {
+ entry_->Print();
+ entry_->Unmark();
+}
+
+
+void Constant::Print() {
+ PrintF("Constant(");
+ handle_->Print();
+ PrintF(")");
+}
+
+
+void SlotLocation::Print() {
+ PrintF("Slot(");
+ switch (type_) {
+ case Slot::PARAMETER:
+ PrintF("PARAMETER, %d)", index_);
+ break;
+ case Slot::LOCAL:
+ PrintF("LOCAL, %d)", index_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ReturnInstr::Print() {
+ PrintF("Return ");
+ value_->Print();
+ PrintF("\n");
+}
+
+
+void InstructionBlock::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ PrintF("L%d:\n", number());
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->Print();
+ }
+ PrintF("Goto L%d\n\n", successor_->number());
+ successor_->Print();
+ }
+}
+
+
+void EntryNode::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ successor_->Print();
+ }
+}
+
+
+void ExitNode::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ PrintF("L%d:\nExit\n\n", number());
+ }
+}
+
+#endif // DEBUG
+
+} } // namespace v8::internal
diff --git a/src/cfg.h b/src/cfg.h
new file mode 100644
index 0000000..fb732dd
--- /dev/null
+++ b/src/cfg.h
@@ -0,0 +1,385 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CFG_H_
+#define V8_CFG_H_
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+class ExitNode;
+
+// A convenient class to keep 'global' values when building a CFG. Since
+// CFG construction can be invoked recursively, CFG globals are stacked.
+class CfgGlobals BASE_EMBEDDED {
+ public:
+ explicit CfgGlobals(FunctionLiteral* fun);
+
+ ~CfgGlobals() { top_ = previous_; }
+
+ static CfgGlobals* current() {
+ ASSERT(top_ != NULL);
+ return top_;
+ }
+
+ FunctionLiteral* fun() { return global_fun_; }
+
+ ExitNode* exit() { return global_exit_; }
+
+#ifdef DEBUG
+ int next_number() { return node_counter_++; }
+#endif
+
+ private:
+ static CfgGlobals* top_;
+
+ // Function literal currently compiling.
+ FunctionLiteral* global_fun_;
+
+ // Shared global exit node for all returns from the same function.
+ ExitNode* global_exit_;
+
+#ifdef DEBUG
+ // Used to number nodes when printing.
+ int node_counter_;
+#endif
+
+ CfgGlobals* previous_;
+};
+
+
+// Values appear in instructions. They represent trivial source
+// expressions: ones with no side effects and that do not require code to be
+// generated.
+class Value : public ZoneObject {
+ public:
+ virtual ~Value() {}
+
+ virtual void ToRegister(MacroAssembler* masm, Register reg) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+};
+
+
+// A compile-time constant that appeared as a literal in the source AST.
+class Constant : public Value {
+ public:
+ explicit Constant(Handle<Object> handle) : handle_(handle) {}
+
+ virtual ~Constant() {}
+
+ void ToRegister(MacroAssembler* masm, Register reg);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Handle<Object> handle_;
+};
+
+
+// Locations are values that can be stored into ('lvalues').
+class Location : public Value {
+ public:
+ virtual ~Location() {}
+
+ virtual void ToRegister(MacroAssembler* masm, Register reg) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+};
+
+
+// SlotLocations represent parameters and stack-allocated (i.e.,
+// non-context) local variables.
+class SlotLocation : public Location {
+ public:
+ SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
+
+ void ToRegister(MacroAssembler* masm, Register reg);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Slot::Type type_;
+ int index_;
+};
+
+
+// Instructions are computations. The represent non-trivial source
+// expressions: typically ones that have side effects and require code to
+// be generated.
+class Instruction : public ZoneObject {
+ public:
+ virtual ~Instruction() {}
+
+ virtual void Compile(MacroAssembler* masm) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+};
+
+
+// Return a value.
+class ReturnInstr : public Instruction {
+ public:
+ explicit ReturnInstr(Value* value) : value_(value) {}
+
+ virtual ~ReturnInstr() {}
+
+ void Compile(MacroAssembler* masm);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Value* value_;
+};
+
+
+// Nodes make up control-flow graphs. They contain single-entry,
+// single-exit blocks of instructions and administrative nodes making up the
+// graph structure.
+class CfgNode : public ZoneObject {
+ public:
+ CfgNode() : is_marked_(false) {
+#ifdef DEBUG
+ number_ = -1;
+#endif
+ }
+
+ virtual ~CfgNode() {}
+
+ bool is_marked() { return is_marked_; }
+
+ virtual bool is_block() { return false; }
+
+ virtual void Unmark() = 0;
+
+ virtual void Compile(MacroAssembler* masm) = 0;
+
+#ifdef DEBUG
+ int number() {
+ if (number_ == -1) number_ = CfgGlobals::current()->next_number();
+ return number_;
+ }
+
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ bool is_marked_;
+
+#ifdef DEBUG
+ int number_;
+#endif
+};
+
+
+// A block is a single-entry, single-exit block of instructions.
+class InstructionBlock : public CfgNode {
+ public:
+ InstructionBlock() : successor_(NULL), instructions_(4) {}
+
+ virtual ~InstructionBlock() {}
+
+ static InstructionBlock* cast(CfgNode* node) {
+ ASSERT(node->is_block());
+ return reinterpret_cast<InstructionBlock*>(node);
+ }
+
+ void set_successor(CfgNode* succ) {
+ ASSERT(successor_ == NULL);
+ successor_ = succ;
+ }
+
+ bool is_block() { return true; }
+
+ void Unmark();
+
+ void Compile(MacroAssembler* masm);
+
+ void Append(Instruction* instr) { instructions_.Add(instr); }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ CfgNode* successor_;
+ ZoneList<Instruction*> instructions_;
+};
+
+
+// The CFG for a function has a distinguished entry node. It has no
+// predecessors and a single successor. The successor is the block
+// containing the function's first instruction.
+class EntryNode : public CfgNode {
+ public:
+ explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
+
+ virtual ~EntryNode() {}
+
+ void Unmark();
+
+ void Compile(MacroAssembler* masm);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ InstructionBlock* successor_;
+};
+
+
+// The CFG for a function has a distinguished exit node. It has no
+// successor and arbitrarily many predecessors. The predecessors are all
+// the blocks returning from the function.
+class ExitNode : public CfgNode {
+ public:
+ ExitNode() {}
+
+ virtual ~ExitNode() {}
+
+ void Unmark();
+
+ void Compile(MacroAssembler* masm);
+
+#ifdef DEBUG
+ void Print();
+#endif
+};
+
+
+// A CFG consists of a linked structure of nodes. It has a single entry
+// node and optionally an exit node. There is a distinguished global exit
+// node that is used as the successor of all blocks that return from the
+// function.
+//
+// Fragments of control-flow graphs, produced when traversing the statements
+// and expressions in the source AST, are represented by the same class.
+// They have instruction blocks as both their entry and exit (if there is
+// one). Instructions can always be prepended or appended to fragments, and
+// fragments can always be concatenated.
+//
+// A singleton CFG fragment (i.e., with only one node) has the same node as
+// both entry and exit (if the exit is available).
+class Cfg : public ZoneObject {
+ public:
+ // Create a singleton CFG fragment.
+ explicit Cfg(InstructionBlock* block) : entry_(block), exit_(block) {}
+
+ // Build the CFG for a function.
+ static Cfg* Build();
+
+ // The entry and exit nodes.
+ CfgNode* entry() { return entry_; }
+ CfgNode* exit() { return exit_; }
+
+ // True if the CFG has no nodes.
+ bool is_empty() { return entry_ == NULL; }
+
+ // True if the CFG has an available exit node (i.e., it can be appended or
+ // concatenated to).
+ bool has_exit() { return exit_ != NULL; }
+
+ // Add an entry node to a CFG fragment. It is no longer a fragment
+ // (instructions cannot be prepended).
+ void PrependEntryNode();
+
+ // Append an instruction to the end of a CFG fragment. Assumes it has an
+ // available exit.
+ void Append(Instruction* instr);
+
+ // Appends a return instruction to the end of a CFG fragment. It no
+ // longer has an available exit node.
+ void AppendReturnInstruction(Value* value);
+
+ Handle<Code> Compile(Handle<Script> script);
+
+#ifdef DEBUG
+ // Support for printing.
+ void Print();
+#endif
+
+ private:
+ // Entry and exit nodes.
+ CfgNode* entry_;
+ CfgNode* exit_;
+};
+
+
+// An Expression Builder traverses a trivial expression and returns a value.
+class ExpressionBuilder : public AstVisitor {
+ public:
+ ExpressionBuilder() : value_(new Constant(Handle<Object>::null())) {}
+
+ Value* value() { return value_; }
+
+ // AST node visitors.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ Value* value_;
+};
+
+
+// A StatementBuilder traverses a statement and returns a CFG.
+class StatementBuilder : public AstVisitor {
+ public:
+ StatementBuilder() : cfg_(new Cfg(new InstructionBlock())) {}
+
+ Cfg* cfg() { return cfg_; }
+
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visitors.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ Cfg* cfg_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_CFG_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index aecdfb9..f0d97fe 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "cfg.h"
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
@@ -78,6 +79,22 @@
return Handle<Code>::null();
}
+ if (FLAG_multipass) {
+ CfgGlobals scope(literal);
+ Cfg* cfg = Cfg::Build();
+#ifdef DEBUG
+ if (FLAG_print_cfg && cfg != NULL) {
+ SmartPointer<char> name = literal->name()->ToCString();
+ PrintF("Function \"%s\":\n", *name);
+ cfg->Print();
+ PrintF("\n");
+ }
+#endif
+ if (cfg != NULL) {
+ return cfg->Compile(script);
+ }
+ }
+
// Generate code and return it.
Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
return result;
diff --git a/src/date-delay.js b/src/date-delay.js
index 6adde46..0778dc9 100644
--- a/src/date-delay.js
+++ b/src/date-delay.js
@@ -156,6 +156,7 @@
// NOTE: The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per month.
+// If this function is called with NaN it returns NaN.
function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object.
var cache = DST_offset_cache;
@@ -219,6 +220,7 @@
var timezone_cache_timezone;
function LocalTimezone(t) {
+ if (NUMBER_IS_NAN(t)) return "";
if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
@@ -464,9 +466,11 @@
value = cache.time;
} else {
value = DateParse(year);
- cache.time = value;
- cache.year = YearFromTime(LocalTimeNoCheck(value));
- cache.string = year;
+ if (!NUMBER_IS_NAN(value)) {
+ cache.time = value;
+ cache.year = YearFromTime(LocalTimeNoCheck(value));
+ cache.string = year;
+ }
}
} else {
@@ -647,11 +651,13 @@
function LocalTimezoneString(time) {
- var timezoneOffset = (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
+ var timezoneOffset =
+ (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
var sign = (timezoneOffset >= 0) ? 1 : -1;
var hours = FLOOR((sign * timezoneOffset)/60);
var min = FLOOR((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + TwoDigitString(hours) + TwoDigitString(min);
+ var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
+ TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + LocalTimezone(time) + ')';
}
diff --git a/src/execution.h b/src/execution.h
index 8cfdec2..126b172 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -205,7 +205,7 @@
static void EnableInterrupts();
static void DisableInterrupts();
- static const uintptr_t kLimitSize = 512 * KB;
+ static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xffffffff;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index b0770b0..3df11f7 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -133,6 +133,7 @@
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
+DEFINE_bool(multipass, false, "use the multipass code generator")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -267,6 +268,7 @@
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
+DEFINE_bool(print_cfg, false, "print control-flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/src/heap-inl.h b/src/heap-inl.h
index d27f14f..114ae0d 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -159,9 +159,7 @@
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
-#ifndef V8_HOST_ARCH_64_BIT
Page::SetRSet(address, offset);
-#endif // V8_HOST_ARCH_64_BIT
}
diff --git a/src/heap.cc b/src/heap.cc
index ebd0e1e..d813ed1 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -681,33 +681,11 @@
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
-#ifdef V8_HOST_ARCH_64_BIT
- // TODO(X64): Make this go away again. We currently disable RSets for
- // 64-bit-mode.
- HeapObjectIterator old_pointer_iterator(old_pointer_space_);
- while (old_pointer_iterator.has_next()) {
- HeapObject* heap_object = old_pointer_iterator.next();
- heap_object->Iterate(&scavenge_visitor);
- }
- HeapObjectIterator map_iterator(map_space_);
- while (map_iterator.has_next()) {
- HeapObject* heap_object = map_iterator.next();
- heap_object->Iterate(&scavenge_visitor);
- }
- LargeObjectIterator lo_iterator(lo_space_);
- while (lo_iterator.has_next()) {
- HeapObject* heap_object = lo_iterator.next();
- if (heap_object->IsFixedArray()) {
- heap_object->Iterate(&scavenge_visitor);
- }
- }
-#else // !defined(V8_HOST_ARCH_64_BIT)
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
-#endif
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@@ -830,13 +808,11 @@
int Heap::UpdateRSet(HeapObject* obj) {
-#ifndef V8_HOST_ARCH_64_BIT
- // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
ASSERT(!InNewSpace(obj));
// Special handling of fixed arrays to iterate the body based on the start
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
// will not work because Page::SetRSet needs to have the start of the
- // object.
+ // object for large object pages.
if (obj->IsFixedArray()) {
FixedArray* array = FixedArray::cast(obj);
int length = array->length();
@@ -853,7 +829,6 @@
UpdateRSetVisitor v;
obj->Iterate(&v);
}
-#endif // V8_HOST_ARCH_64_BIT
return obj->Size();
}
diff --git a/src/heap.h b/src/heap.h
index 69d9ff0..30522dc 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -257,7 +257,7 @@
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
static Address NewSpaceStart() { return new_space_.start(); }
- static uint32_t NewSpaceMask() { return new_space_.mask(); }
+ static uintptr_t NewSpaceMask() { return new_space_.mask(); }
static Address NewSpaceTop() { return new_space_.top(); }
static NewSpace* new_space() { return &new_space_; }
@@ -1123,11 +1123,9 @@
HeapObject* object = HeapObject::cast(*current);
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
-#ifndef V8_TARGET_ARCH_X64
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
}
-#endif
}
}
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 70b510e..b648055 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -226,7 +226,9 @@
times_1 = 0,
times_2 = 1,
times_4 = 2,
- times_8 = 3
+ times_8 = 3,
+ times_pointer_size = times_4,
+ times_half_pointer_size = times_2
};
diff --git a/src/ia32/cfg-ia32.cc b/src/ia32/cfg-ia32.cc
new file mode 100644
index 0000000..01ee5e6
--- /dev/null
+++ b/src/ia32/cfg-ia32.cc
@@ -0,0 +1,137 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "macro-assembler-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(ebp);
+ __ mov(ebp, esp);
+ __ push(esi);
+ __ push(edi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(eax);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ Comment cmnt(masm, "[ ReturnInstr");
+ value_->ToRegister(masm, eax);
+}
+
+
+void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+ __ mov(reg, Immediate(handle_));
+}
+
+
+void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
+ switch (type_) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ mov(reg, Operand(ebp, (1 + count - index_) * kPointerSize));
+ break;
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ __ mov(reg, Operand(ebp, kOffset - index_ * kPointerSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 6648277..c99bc6f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -5154,11 +5154,10 @@
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
- __ shr(ebp_as_smi.reg(), kSmiTagSize);
frame_->Push(&ebp_as_smi);
}
@@ -7786,7 +7785,7 @@
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(NegateCondition(equal), ¬_outermost_js);
+ __ j(not_equal, ¬_outermost_js);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ bind(¬_outermost_js);
#endif
@@ -7837,7 +7836,7 @@
// If current EBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(NegateCondition(equal), ¬_outermost_js_2);
+ __ j(not_equal, ¬_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(¬_outermost_js_2);
#endif
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 4bb006f..2abe422 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -603,7 +603,7 @@
friend class Reference;
friend class Result;
- friend class CodeGeneratorPatcher; // Used in test-log-ia32.cc
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index fae1525..de0ef8e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -146,43 +146,30 @@
// for the remembered set bits.
Label done;
- // This optimization cannot survive serialization and deserialization,
- // so we disable as long as serialization can take place.
- int32_t new_space_start =
- reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
- if (Serializer::enabled() || new_space_start < 0) {
- // Cannot do smart bit-twiddling. Need to do two consecutive checks.
- // Check for Smi first.
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- // Test that the object address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // Skip barrier if writing a smi.
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
and_(value, Heap::NewSpaceMask());
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
- // move the value SmiTag into the sign bit
- shl(value, 31);
- // combine the object with value SmiTag
- or_(value, Operand(object));
- // remove the uninteresing bits inside the page
- and_(value, Heap::NewSpaceMask() | (1 << 31));
- // xor has two effects:
- // - if the value was a smi, then the result will be negative
- // - if the object is pointing into new space area the page bits will
- // all be zero
- xor_(value, new_space_start | (1 << 31));
- // Check for both conditions in one branch
- j(less_equal, &done);
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start().address());
+ lea(value, Operand(object, -new_space_start));
+ and_(value, Heap::NewSpaceMask());
+ j(equal, &done);
}
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
- mov(value, Operand(object));
+ lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
- add(Operand(value), Immediate(offset));
- shr(value, kObjectAlignmentBits);
+ shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
@@ -192,7 +179,7 @@
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- bts(Operand(object, 0), value);
+ bts(Operand(object, Page::kRSetOffset), value);
} else {
Register dst = scratch;
if (offset != 0) {
@@ -201,7 +188,9 @@
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
- lea(dst, Operand(object, dst, times_2,
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
diff --git a/src/log.cc b/src/log.cc
index 0c1b76d..44e5c32 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -957,38 +957,63 @@
}
-bool Logger::IsProfilerPaused() {
- return profiler_->paused();
+int Logger::GetActiveProfilerModules() {
+ int result = PROFILER_MODULE_NONE;
+ if (!profiler_->paused()) {
+ result |= PROFILER_MODULE_CPU;
+ }
+ if (FLAG_log_gc) {
+ result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
+ }
+ return result;
}
-void Logger::PauseProfiler() {
- if (profiler_->paused()) {
- return;
+void Logger::PauseProfiler(int flags) {
+ if (!Log::IsEnabled()) return;
+ const int active_modules = GetActiveProfilerModules();
+ const int modules_to_disable = active_modules & flags;
+ if (modules_to_disable == PROFILER_MODULE_NONE) return;
+
+ if (modules_to_disable & PROFILER_MODULE_CPU) {
+ profiler_->pause();
+ if (FLAG_prof_lazy) {
+ if (!FLAG_sliding_state_window) ticker_->Stop();
+ FLAG_log_code = false;
+ // Must be the same message as Log::kDynamicBufferSeal.
+ LOG(UncheckedStringEvent("profiler", "pause"));
+ }
}
- profiler_->pause();
- if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window) ticker_->Stop();
- FLAG_log_code = false;
- // Must be the same message as Log::kDynamicBufferSeal.
- LOG(UncheckedStringEvent("profiler", "pause"));
+ if (modules_to_disable &
+ (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+ FLAG_log_gc = false;
}
- is_logging_ = false;
+ // Turn off logging if no active modules remain.
+ if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
+ is_logging_ = false;
+ }
}
-void Logger::ResumeProfiler() {
- if (!profiler_->paused() || !Log::IsEnabled()) {
- return;
+void Logger::ResumeProfiler(int flags) {
+ if (!Log::IsEnabled()) return;
+ const int modules_to_enable = ~GetActiveProfilerModules() & flags;
+ if (modules_to_enable != PROFILER_MODULE_NONE) {
+ is_logging_ = true;
}
- is_logging_ = true;
- if (FLAG_prof_lazy) {
- LOG(UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- if (!FLAG_sliding_state_window) ticker_->Start();
+ if (modules_to_enable & PROFILER_MODULE_CPU) {
+ if (FLAG_prof_lazy) {
+ LOG(UncheckedStringEvent("profiler", "resume"));
+ FLAG_log_code = true;
+ LogCompiledFunctions();
+ if (!FLAG_sliding_state_window) ticker_->Start();
+ }
+ profiler_->resume();
}
- profiler_->resume();
+ if (modules_to_enable &
+ (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+ FLAG_log_gc = true;
+ }
}
@@ -996,7 +1021,7 @@
// either from main or Profiler's thread.
void Logger::StopLoggingAndProfiling() {
Log::stop();
- PauseProfiler();
+ PauseProfiler(PROFILER_MODULE_CPU);
}
diff --git a/src/log.h b/src/log.h
index 1692e77..89f6cdb 100644
--- a/src/log.h
+++ b/src/log.h
@@ -249,11 +249,11 @@
}
// Pause/Resume collection of profiling data.
- // When data collection is paused, Tick events are discarded until
+ // When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- static bool IsProfilerPaused();
- static void PauseProfiler();
- static void ResumeProfiler();
+ static void PauseProfiler(int flags);
+ static void ResumeProfiler(int flags);
+ static int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 57c884f..6ec5070 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -225,33 +225,60 @@
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- FILE *fp;
- fp = fopen("/proc/self/maps", "r");
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE *fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
int c;
if (attr_r == 'r' && attr_x == 'x') {
- while (c = getc(fp), (c != EOF) && (c != '\n') && (c != '/'));
- char lib_name[1024];
- bool lib_has_name = false;
+ // Found a readable and executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
if (c == '/') {
- ungetc(c, fp);
- lib_has_name = fgets(lib_name, sizeof(lib_name), fp) != NULL;
- }
- if (lib_has_name && strlen(lib_name) > 0) {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
lib_name[strlen(lib_name) - 1] = '\0';
} else {
- snprintf(lib_name, sizeof(lib_name),
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
LOG(SharedLibraryEvent(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
}
- while (c = getc(fp), (c != EOF) && (c != '\n'));
}
+ free(lib_name);
fclose(fp);
#endif
}
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 60ae76d..c0cf7f4 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -80,7 +80,7 @@
// Returns a string identifying the current timezone taking into
// account daylight saving.
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
UNIMPLEMENTED();
return "<none>";
}
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 6174522..b8fe967 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -86,16 +86,20 @@
}
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
- return const_cast<char*>(t->tm_zone);
+ if (NULL == t) return "";
+ return t->tm_zone;
}
double OS::DaylightSavingsOffset(double time) {
+ if (isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
+ if (NULL == t) return nan_value();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index a8a6243..633b2c2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -603,7 +603,7 @@
// Returns a string identifying the current timezone taking into
// account daylight saving.
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
return Time(time).LocalTimezone();
}
diff --git a/src/platform.h b/src/platform.h
index 11a1e79..76bf891 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -143,7 +143,7 @@
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
- static char* LocalTimezone(double time);
+ static const char* LocalTimezone(double time);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
diff --git a/src/runtime.cc b/src/runtime.cc
index 0b98167..56e9f85 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4920,7 +4920,7 @@
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- char* zone = OS::LocalTimezone(x);
+ const char* zone = OS::LocalTimezone(x);
return Heap::AllocateStringFromUtf8(CStrVector(zone));
}
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 8b2eab0..0b4315c 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -103,9 +103,9 @@
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
// For a 64-bit address, if it is:
-// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) |
+// | page address | words(5) | bit offset(5) | pointer alignment (3) |
// The address of the rset word containing the bit for this word is computed as:
-// page_address + quadwords * 4 + kRSetOffset.
+// page_address + words * 4 + kRSetOffset.
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
// even on the X64 architecture.
@@ -115,7 +115,7 @@
Page* page = Page::FromAddress(address);
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
- kObjectAlignmentBits);
+ kPointerSizeLog2);
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
diff --git a/src/spaces.h b/src/spaces.h
index 9841a5f..57e7c1f 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -99,8 +99,11 @@
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
-// TODO(X64): This description only represents the 32-bit layout.
-// On the 64-bit platform, we add an offset to the start of the remembered set.
+//
+// On the 64-bit platform, we add an offset to the start of the remembered set,
+// and pointers are aligned to 8-byte pointer size. This means that we need
+// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
+// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@@ -118,7 +121,7 @@
// from [page_addr .. page_addr + kPageSize[
//
// Note that this function only works for addresses in normal paged
- // spaces and addresses in the first 8K of large object pages (ie,
+ // spaces and addresses in the first 8K of large object pages (i.e.,
// the start of large objects but not necessarily derived pointers
// within them).
INLINE(static Page* FromAddress(Address a)) {
@@ -218,7 +221,7 @@
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
- // The offset of the remembered set in a page, in addition to the empty words
+ // The offset of the remembered set in a page, in addition to the empty bytes
// formed as the remembered bits of the remembered set itself.
#ifdef V8_TARGET_ARCH_X64
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
@@ -234,7 +237,7 @@
// to align start of rset to a uint32_t address.
static const int kObjectStartOffset = 256;
- // The start offset of the remembered set in a page.
+ // The start offset of the used part of the remembered set in a page.
static const int kRSetStartOffset = kRSetOffset +
kObjectStartOffset / kBitsPerPointer;
@@ -264,16 +267,16 @@
// low-order bit for large object pages will be cleared.
int is_normal_page;
- // The following fields overlap with remembered set, they can only
+ // The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
// used.
- // The allocation pointer after relocating objects to this page.
- Address mc_relocation_top;
-
// The index of the page in its owner space.
int mc_page_index;
+ // The allocation pointer after relocating objects to this page.
+ Address mc_relocation_top;
+
// The forwarding address of the first live object in this page.
Address mc_first_forwarded;
@@ -1165,7 +1168,7 @@
// The start address of the space and a bit mask. Anding an address in the
// new space with the mask will result in the start address.
Address start() { return start_; }
- uint32_t mask() { return address_mask_; }
+ uintptr_t mask() { return address_mask_; }
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
diff --git a/src/string-stream.cc b/src/string-stream.cc
index ee343a5..cec4167 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -153,7 +153,7 @@
}
break;
}
- case 'i': case 'd': case 'u': case 'x': case 'c': case 'p': case 'X': {
+ case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
int value = current.data_.u_int_;
EmbeddedVector<char, 24> formatted;
int length = OS::SNPrintF(formatted, temp.start(), value);
@@ -167,6 +167,13 @@
Add(formatted.start());
break;
}
+ case 'p': {
+ void* value = current.data_.u_pointer_;
+ EmbeddedVector<char, 20> formatted;
+ OS::SNPrintF(formatted, temp.start(), value);
+ Add(formatted.start());
+ break;
+ }
default:
UNREACHABLE();
break;
diff --git a/src/string-stream.h b/src/string-stream.h
index 5732944..6649f18 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -90,21 +90,12 @@
FmtElm(Handle<Object> value) : type_(HANDLE) { // NOLINT
data_.u_handle_ = value.location();
}
- FmtElm(void* value) : type_(INT) { // NOLINT
-#if V8_HOST_ARCH_64_BIT
- // TODO(x64): FmtElm needs to treat pointers as pointers, and not as
- // ints. This will require adding a pointer type, etc. For now just
- // hack it and truncate the pointer.
- // http://code.google.com/p/v8/issues/detail?id=335
- data_.u_int_ = 0;
- UNIMPLEMENTED();
-#else
- data_.u_int_ = reinterpret_cast<int>(value);
-#endif
+ FmtElm(void* value) : type_(POINTER) { // NOLINT
+ data_.u_pointer_ = value;
}
private:
friend class StringStream;
- enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE };
+ enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
Type type_;
union {
int u_int_;
@@ -113,6 +104,7 @@
const Vector<const uc16>* u_lc_str_;
Object* u_obj_;
Object** u_handle_;
+ void* u_pointer_;
} data_;
};
diff --git a/src/variables.h b/src/variables.h
index 3f1f11b..c2adb23 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -162,9 +162,7 @@
// be the global scope). scope() is NULL in that case. Currently the
// scope is only used to follow the context chain length.
Scope* scope() const { return scope_; }
- // If this assertion fails it means that some code has tried to
- // treat the special this variable as an ordinary variable with
- // the name "this".
+
Handle<String> name() const { return name_; }
Mode mode() const { return mode_; }
bool is_accessed_from_inner_scope() const {
diff --git a/src/version.cc b/src/version.cc
index 911f6e0..8579aa2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 1
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 2
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index ad4721d..4f482f5 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -44,15 +44,25 @@
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
- const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+ static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
return x == (x & kUInt32Mask);
}
static inline bool is_int32(int64_t x) {
- const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+ static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
return is_uint32(x - kMinIntValue);
}
+static inline bool uint_is_int32(uint64_t x) {
+ static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
+ return x < kMaxIntValue;
+}
+
+static inline bool is_uint32(uint64_t x) {
+ static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
+ return x < kMaxUIntValue;
+}
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
diff --git a/src/x64/cfg-x64.cc b/src/x64/cfg-x64.cc
new file mode 100644
index 0000000..86754ee
--- /dev/null
+++ b/src/x64/cfg-x64.cc
@@ -0,0 +1,146 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ __ push(rdi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ movq(kScratchRegister, Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ __ push(kScratchRegister);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_limit);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(rax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // "movq rsp, rbp; pop rbp" has length 5. "ret k" has length 2.
+ const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2;
+ for (int i = 0; i < kPadding; ++i) {
+ __ int3();
+ }
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ Comment cmnt(masm, "[ ReturnInstr");
+ value_->ToRegister(masm, rax);
+}
+
+
+void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+ __ Move(reg, handle_);
+}
+
+
+void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
+ switch (type_) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ movq(reg, Operand(rbp, (1 + count - index_) * kPointerSize));
+ break;
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ __ movq(reg, Operand(rbp, kOffset - index_ * kPointerSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 3112ecc..d2ac163 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -3421,9 +3421,20 @@
}
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi.
+ Result rbp_as_smi = allocator_->Allocate();
+ ASSERT(rbp_as_smi.is_valid());
+ __ movq(rbp_as_smi.reg(), rbp);
+ frame_->Push(&rbp_as_smi);
+}
+
+
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
frame_->SpillAll();
+ __ push(rsi);
// Make sure the frame is aligned like the OS expects.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -3436,11 +3447,12 @@
// Call V8::RandomPositiveSmi().
__ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
- // Restore stack pointer from callee-saved register edi.
+ // Restore stack pointer from callee-saved register.
if (kFrameAlignment > 0) {
__ movq(rsp, rbx);
}
+ __ pop(rsi);
Result result = allocator_->Allocate(rax);
frame_->Push(&result);
}
@@ -5555,13 +5567,16 @@
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST) {
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
GetValue(typeof_state);
return;
}
// Only non-constant, frame-allocated parameters and locals can reach
- // here.
+ // here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
@@ -6419,22 +6434,23 @@
// Fetch top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
- __ movq(rdx, Operand(kScratchRegister, 0));
+ __ movq(rsp, Operand(kScratchRegister, 0));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
- __ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
+ __ cmpq(Operand(rsp, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
- __ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(rsp);
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- __ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
+ __ pop(rax);
__ store_rax(handler_address);
// Set external caught exception to false.
@@ -6447,14 +6463,12 @@
ExternalReference pending_exception(Top::k_pending_exception_address);
__ store_rax(pending_exception);
- // Restore the stack to the address of the ENTRY handler
- __ movq(rsp, rdx);
-
// Clear the context pointer;
__ xor_(rsi, rsi);
// Restore registers from handler.
-
+ ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ StackHandlerConstants::kFPOffset);
__ pop(rbp); // FP
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
@@ -6570,6 +6584,9 @@
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
// Setup frame.
__ push(rbp);
@@ -6595,6 +6612,17 @@
__ load_rax(c_entry_fp);
__ push(rax);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, ¬_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(¬_outermost_js);
+#endif
+
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -6637,6 +6665,16 @@
// Pop next_sp.
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, ¬_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(¬_outermost_js_2);
+#endif
+
// Restore the top frame descriptor from the stack.
__ bind(&exit);
__ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
@@ -6770,6 +6808,7 @@
__ bind(&done);
}
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -6804,6 +6843,7 @@
__ bind(&done);
}
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float) {
Label test_other, done;
@@ -6841,6 +6881,7 @@
}
}
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
@@ -6981,7 +7022,6 @@
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
-
if (flags_ == SMI_CODE_IN_STUB) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 9e69007..5014f71 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -534,6 +534,8 @@
void GenerateLog(ZoneList<Expression*>* args);
+ void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
@@ -593,6 +595,8 @@
friend class Reference;
friend class Result;
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 83e3149..cc8365c 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -88,7 +88,7 @@
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -97,6 +97,7 @@
{ 0x89, OPER_REG_OP_ORDER, "mov" },
{ 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
{ 0x8B, REG_OPER_OP_ORDER, "mov" },
+ { 0x8D, REG_OPER_OP_ORDER, "lea" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -139,7 +140,7 @@
static const char* conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "a", "na",
+ "o", "no", "c", "nc", "z", "nz", "na", "a",
"s", "ns", "pe", "po", "l", "ge", "le", "g"
};
@@ -252,6 +253,24 @@
static InstructionTable instruction_table;
+static InstructionDesc cmov_instructions[16] = {
+ {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
+};
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -533,7 +552,7 @@
value = 0; // Initialize variables on all paths to satisfy the compiler.
count = 0;
}
- AppendToBuffer(V8_PTR_PREFIX"x", value);
+ AppendToBuffer("%" V8_PTR_PREFIX "x", value);
return count;
}
@@ -966,6 +985,13 @@
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
+ } else if ((opcode & 0xF0) == 0x40) {
+ // CMOVcc: conditional move.
+ int condition = opcode & 0x0F;
+ const InstructionDesc& idesc = cmov_instructions[condition];
+ byte_size_operand_ = idesc.byte_size_operation;
+ current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1350,9 +1376,9 @@
const char* memory_location = NameOfAddress(
reinterpret_cast<byte*>(
*reinterpret_cast<int32_t*>(data + 1)));
- if (*data == 0xA3) { // Opcode 0xA3
+ if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movzxlq rax,(%s)", memory_location);
- } else { // Opcode 0xA1
+ } else { // Opcode 0xA3
AppendToBuffer("movzxlq (%s),rax", memory_location);
}
data += 5;
@@ -1362,9 +1388,9 @@
// New x64 instruction mov rax,(imm_64).
const char* memory_location = NameOfAddress(
*reinterpret_cast<byte**>(data + 1));
- if (*data == 0xA3) { // Opcode 0xA3
+ if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movq rax,(%s)", memory_location);
- } else { // Opcode 0xA1
+ } else { // Opcode 0xA3
AppendToBuffer("movq (%s),rax", memory_location);
}
data += 9;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 0ef75f8..8659533 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -424,6 +424,9 @@
// Check that the key is a smi.
__ testl(rbx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
+ // If it is a smi, make sure it is zero-extended, so it can be
+ // used as an index in a memory operand.
+ __ movl(rbx, rbx); // Clear the high bits of rbx.
__ CmpInstanceType(rcx, JS_ARRAY_TYPE);
__ j(equal, &array);
@@ -434,7 +437,7 @@
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
- // rbx: index (as a smi)
+ // rbx: index (as a smi), zero-extended.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
@@ -502,7 +505,8 @@
// rax: value
// rcx: FixedArray
// rbx: index (as a smi)
- __ movq(Operand(rcx, rbx, times_4, FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(Operand(rcx, rbx, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag),
rax);
// Update write barrier for the elements array address.
__ movq(rdx, rax);
@@ -602,9 +606,22 @@
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
+
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
@@ -646,10 +663,23 @@
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
+
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f58e1cd..5e39cb6 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
+#include "serialize.h"
#include "debug.h"
namespace v8 {
@@ -45,11 +46,156 @@
}
-// TODO(x64): For now, the write barrier is disabled on x64 and we
-// therefore generate no code. This should be fixed when the write
-// barrier is enabled.
-void MacroAssembler::RecordWrite(Register object, int offset,
- Register value, Register scratch) {
+
+static void RecordWriteHelper(MacroAssembler* masm,
+ Register object,
+ Register addr,
+ Register scratch) {
+ Label fast;
+
+ // Compute the page address from the heap object pointer, leave it
+ // in 'object'.
+ ASSERT(is_int32(~Page::kPageAlignmentMask));
+ masm->and_(object,
+ Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+
+ // Compute the bit addr in the remembered set, leave it in "addr".
+ masm->subq(addr, object);
+ masm->shr(addr, Immediate(kPointerSizeLog2));
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ masm->cmpq(addr, Immediate(Page::kPageSize / kPointerSize));
+ masm->j(less, &fast);
+
+ // Adjust 'addr' to be relative to the start of the extra remembered set
+ // and the page address in 'object' to be the address of the extra
+ // remembered set.
+ masm->subq(addr, Immediate(Page::kPageSize / kPointerSize));
+ // Load the array length into 'scratch'.
+ masm->movl(scratch,
+ Operand(object,
+ Page::kObjectStartOffset + FixedArray::kLengthOffset));
+ // Extra remembered set starts right after FixedArray.
+ // Add the page header, array header, and array body size
+ // (length * pointer size) to the page address to find the extra remembered
+ // set start.
+ masm->lea(object,
+ Operand(object, scratch, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ masm->bind(&fast);
+ masm->bts(Operand(object, Page::kRSetOffset), addr);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits of three registers (object, address and
+ // scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class AddressBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ RecordWriteHelper(masm, object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch) {
+ // First, check if a remembered set write is even needed. The tests below
+ // catch stores of Smis and stores into young gen (which does not have space
+ // for the remembered set bits.
+ Label done;
+
+ // Test that the object address is not in the new space. We cannot
+ // set remembered set bits in the new space.
+ movq(value, object);
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(value, kScratchRegister);
+ j(equal, &done);
+
+ if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+ // Compute the bit offset in the remembered set, leave it in 'value'.
+ lea(value, Operand(object, offset));
+ ASSERT(is_int32(Page::kPageAlignmentMask));
+ and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+ shr(value, Immediate(kObjectAlignmentBits));
+
+ // Compute the page address from the heap object pointer, leave it in
+ // 'object' (immediate value is sign extended).
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ bts(Operand(object, Page::kRSetOffset), value);
+ } else {
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
+ // into an array of words.
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ }
+ // If we are already generating a shared stub, not inlining the
+ // record write code isn't going to save us any memory.
+ if (generating_stub()) {
+ RecordWriteHelper(this, object, dst, value);
+ } else {
+ RecordWriteStub stub(object, dst, value);
+ CallStub(&stub);
+ }
+ }
+
+ bind(&done);
}