r2080 - Version 1.2.6.
Added a histogram recording hit rates at different levels of the compilation cache.
Added stack overflow check for the RegExp analysis phase. Previously a very long regexp graph could overflow the stack with recursive calls.
Use a dynamic buffer when collecting log events in memory.
Added start/stop events to the profiler log.
Fixed infinite loop which could happen when setting a debug break while executing a RegExp compiled to native code.
Fixed handling of lastIndexOf called with negative index (issue 351).
Fixed irregular crash in profiler test (issue 358).
Fixed compilation issues with some versions of gcc.
git-svn-id: http://v8.googlecode.com/svn/trunk@2080 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 59c452b..64d2063 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -43,14 +43,14 @@
'flags.cc', 'frames.cc', 'func-name-inferrer.cc',
'global-handles.cc', 'handles.cc', 'hashmap.cc',
'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
- 'jump-target.cc', 'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc',
- 'oprofile-agent.cc', 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
- 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
- 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
- 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
- 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
- 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
- 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
+ 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
+ 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
+ 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc', 'runtime.cc',
+ 'scanner.cc', 'scopeinfo.cc', 'scopes.cc', 'serialize.cc',
+ 'snapshot-common.cc', 'spaces.cc', 'string-stream.cc', 'stub-cache.cc',
+ 'token.cc', 'top.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc',
+ 'v8-counters.cc', 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 5dc15a6..eeab4a7 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -84,8 +84,6 @@
};
-const int kNumRegisters = 16;
-
extern Register no_reg;
extern Register r0;
extern Register r1;
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index c7e32c3..fe6d945 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -289,9 +289,7 @@
DeleteFrame();
// Process any deferred code using the register allocator.
- if (HasStackOverflow()) {
- ClearDeferred();
- } else {
+ if (!HasStackOverflow()) {
ProcessDeferred();
}
@@ -757,13 +755,11 @@
class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlineSmiOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperation(Token::Value op,
int value,
bool reversed,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
value_(value),
reversed_(reversed),
overwrite_mode_(overwrite_mode) {
@@ -780,7 +776,12 @@
};
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
void DeferredInlineSmiOperation::Generate() {
+ MacroAssembler* masm = cgen()->masm();
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
@@ -841,15 +842,19 @@
}
GenericBinaryOpStub igostub(op_, overwrite_mode_);
- Result arg0 = generator()->allocator()->Allocate(r1);
+ Result arg0 = cgen()->allocator()->Allocate(r1);
ASSERT(arg0.is_valid());
- Result arg1 = generator()->allocator()->Allocate(r0);
+ Result arg1 = cgen()->allocator()->Allocate(r0);
ASSERT(arg1.is_valid());
- generator()->frame()->CallStub(&igostub, &arg0, &arg1);
+ cgen()->frame()->CallStub(&igostub, &arg0, &arg1);
exit_.Jump();
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
@@ -872,7 +877,7 @@
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC);
deferred->enter()->Branch(vs);
@@ -884,7 +889,7 @@
case Token::SUB: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
if (!reversed) {
__ sub(r0, r0, Operand(value), SetCC);
@@ -902,7 +907,7 @@
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
switch (op) {
@@ -927,7 +932,7 @@
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
@@ -2654,8 +2659,7 @@
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode {
public:
- DeferredObjectLiteral(CodeGenerator* generator, ObjectLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
set_comment("[ DeferredObjectLiteral");
}
@@ -2666,7 +2670,12 @@
};
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
void DeferredObjectLiteral::Generate() {
+ MacroAssembler* masm = cgen()->masm();
// Argument is passed in r1.
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
@@ -2674,7 +2683,7 @@
// If the entry is undefined we call the runtime system to compute
// the literal.
- VirtualFrame* frame = generator()->frame();
+ VirtualFrame* frame = cgen()->frame();
// Literal array (0).
frame->EmitPush(r1);
// Literal index (1).
@@ -2691,6 +2700,10 @@
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2698,7 +2711,7 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literal array and check the allocated entry.
@@ -2783,8 +2796,7 @@
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode {
public:
- DeferredArrayLiteral(CodeGenerator* generator, ArrayLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
set_comment("[ DeferredArrayLiteral");
}
@@ -2795,7 +2807,12 @@
};
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
void DeferredArrayLiteral::Generate() {
+ MacroAssembler* masm = cgen()->masm();
// Argument is passed in r1.
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
@@ -2803,7 +2820,7 @@
// If the entry is undefined we call the runtime system to computed
// the literal.
- VirtualFrame* frame = generator()->frame();
+ VirtualFrame* frame = cgen()->frame();
// Literal array (0).
frame->EmitPush(r1);
// Literal index (1).
@@ -2820,6 +2837,10 @@
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2827,7 +2848,7 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
+ DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literal array and check the allocated entry.
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 24033cb..a8cb777 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -194,8 +194,7 @@
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index ef07967..65e7eaf 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -80,7 +80,7 @@
// branch.
VirtualFrame* fall_through_frame = cgen()->frame();
VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+ RegisterFile non_frame_registers;
cgen()->SetFrame(branch_frame, &non_frame_registers);
// Check if we can avoid merge code.
@@ -163,8 +163,7 @@
// virtual frame before the bind. Afterward, it should not.
ASSERT(cgen()->has_valid_frame());
VirtualFrame* frame = cgen()->frame();
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
@@ -179,15 +178,14 @@
// Pick up the only reaching frame, take ownership of it, and
// use it for the block about to be emitted.
VirtualFrame* frame = reaching_frames_[0];
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[0] = NULL;
__ bind(&merge_labels_[0]);
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
@@ -247,11 +245,11 @@
}
// Pick up the frame for this block. Assume ownership if
// there cannot be backward jumps.
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &reserved);
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
} else {
- cgen()->SetFrame(frame, &reserved);
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
__ bind(&merge_labels_[i]);
@@ -274,8 +272,8 @@
// If this is the fall through, and it didn't need merge
// code, we need to pick up the frame so we can jump around
// subsequent merge blocks if necessary.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
}
@@ -285,8 +283,8 @@
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen()->has_valid_frame()) {
- RegisterFile reserved_registers = RegisterAllocator::Reserved();
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
// There may be unprocessed reaching frames that did not need
@@ -311,8 +309,8 @@
// Use a copy of the reaching frame so the original can be saved
// for possible reuse as a backward merge block.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
__ bind(&merge_labels_[0]);
cgen()->frame()->MergeTo(entry_frame_);
}
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
new file mode 100644
index 0000000..d98818f
--- /dev/null
+++ b/src/arm/register-allocator-arm-inl.h
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
+}
+
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+//
+// r0 <-> 0
+// r1 <-> 1
+// r2 <-> 2
+// r3 <-> 3
+// r4 <-> 4
+// r5 <-> 5
+// r6 <-> 6
+// r7 <-> 7
+// r9 <-> 8
+// r10 <-> 9
+// ip <-> 10
+// lr <-> 11
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // r0
+ 1, // r1
+ 2, // r2
+ 3, // r3
+ 4, // r4
+ 5, // r5
+ 6, // r6
+ 7, // r7
+ -1, // cp
+ 8, // r9
+ 9, // r10
+ -1, // fp
+ 10, // ip
+ -1, // sp
+ 11, // lr
+ -1 // pc
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] =
+ { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved r1 and lr registers are live on JS function entry.
+ Use(r1); // JS function.
+ Use(lr); // Return address.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
index 0d90129..ad0c7f9 100644
--- a/src/arm/register-allocator-arm.cc
+++ b/src/arm/register-allocator-arm.cc
@@ -49,54 +49,9 @@
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
-RegisterFile RegisterAllocator::Reserved() {
- RegisterFile reserved;
- reserved.Use(sp);
- reserved.Use(fp);
- reserved.Use(cp);
- reserved.Use(pc);
- return reserved;
-}
-
-
-void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
- register_file->ref_counts_[sp.code()] = 0;
- register_file->ref_counts_[fp.code()] = 0;
- register_file->ref_counts_[cp.code()] = 0;
- register_file->ref_counts_[pc.code()] = 0;
-}
-
-
-bool RegisterAllocator::IsReserved(int reg_code) {
- return (reg_code == sp.code())
- || (reg_code == fp.code())
- || (reg_code == cp.code())
- || (reg_code == pc.code());
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The following registers are live on function entry, saved in the
- // frame, and available for allocation during execution.
- Use(r1); // JS function.
- Use(lr); // Return address.
-}
-
-
-void RegisterAllocator::Reset() {
- registers_.Reset();
- // The following registers are live on function entry and reserved
- // during execution.
- Use(sp); // Stack pointer.
- Use(fp); // Frame pointer (caller's frame pointer on entry).
- Use(cp); // Context context (callee's context on entry).
- Use(pc); // Program counter.
-}
-
-
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- UNIMPLEMENTED();
+ // No byte registers on ARM.
+ UNREACHABLE();
return Result();
}
diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h
new file mode 100644
index 0000000..f953ed9
--- /dev/null
+++ b/src/arm/register-allocator-arm.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 12;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index e51f963..9527383 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -49,7 +49,7 @@
for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
@@ -96,7 +96,7 @@
// Fix any sync bit problems from the bottom-up, stopping when we
// hit the stack pointer or the top of the frame if the stack
// pointer is floating above the frame.
- int limit = Min(static_cast<int>(stack_pointer_), elements_.length() - 1);
+ int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = 0; i <= limit; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
@@ -128,7 +128,7 @@
// On ARM, all elements are in memory.
#ifdef DEBUG
- int start = Min(static_cast<int>(stack_pointer_), elements_.length() - 1);
+ int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = start; i >= 0; i--) {
ASSERT(elements_[i].is_memory());
ASSERT(expected->elements_[i].is_memory());
@@ -393,7 +393,7 @@
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
- int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
@@ -419,7 +419,7 @@
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
@@ -427,7 +427,7 @@
void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 07e1be0..fc202e2 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -83,21 +83,35 @@
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
int height() {
- return elements_.length() - expression_base_index();
+ return element_count() - expression_base_index();
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+ }
+
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -109,7 +123,7 @@
// the frame after a runtime call). No code is emitted.
void Forget(int count) {
ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
ForgetElements(count);
}
@@ -124,7 +138,7 @@
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_index(reg));
+ if (is_used(reg)) SpillElementAt(register_location(reg));
}
// Spill all occurrences of an arbitrary register if possible. Return the
@@ -148,11 +162,8 @@
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Unuse(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
}
}
@@ -162,11 +173,8 @@
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Use(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
}
}
@@ -205,7 +213,7 @@
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
@@ -336,7 +344,7 @@
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -387,7 +395,7 @@
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
@@ -420,8 +428,8 @@
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
- ASSERT(index < elements_.length());
- ASSERT(frame_pointer() < elements_.length()); // FP is on the frame.
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
@@ -430,7 +438,7 @@
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
- register_locations_[reg.code()] = index;
+ set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
@@ -438,8 +446,8 @@
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
- ASSERT(register_locations_[reg.code()] != kIllegalIndex);
- register_locations_[reg.code()] = kIllegalIndex;
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
@@ -453,7 +461,7 @@
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
+ // Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index f75b302..12582a9 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -37,20 +37,20 @@
void DeferredCode::SetEntryFrame(Result* arg) {
- ASSERT(generator()->has_valid_frame());
- generator()->frame()->Push(arg);
- enter()->set_entry_frame(new VirtualFrame(generator()->frame()));
- *arg = generator()->frame()->Pop();
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->Push(arg);
+ enter()->set_entry_frame(new VirtualFrame(cgen()->frame()));
+ *arg = cgen()->frame()->Pop();
}
void DeferredCode::SetEntryFrame(Result* arg0, Result* arg1) {
- ASSERT(generator()->has_valid_frame());
- generator()->frame()->Push(arg0);
- generator()->frame()->Push(arg1);
- enter()->set_entry_frame(new VirtualFrame(generator()->frame()));
- *arg1 = generator()->frame()->Pop();
- *arg0 = generator()->frame()->Pop();
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->Push(arg0);
+ cgen()->frame()->Push(arg1);
+ enter()->set_entry_frame(new VirtualFrame(cgen()->frame()));
+ *arg1 = cgen()->frame()->Pop();
+ *arg0 = cgen()->frame()->Pop();
}
diff --git a/src/codegen.cc b/src/codegen.cc
index 51cc393..3b288d4 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -45,32 +45,24 @@
CodeGenerator* CodeGeneratorScope::top_ = NULL;
-DeferredCode::DeferredCode(CodeGenerator* generator)
- : generator_(generator),
- masm_(generator->masm()),
- exit_(JumpTarget::BIDIRECTIONAL),
- statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
- generator->AddDeferred(this);
+DeferredCode::DeferredCode() : exit_(JumpTarget::BIDIRECTIONAL) {
+ MacroAssembler* masm = cgen()->masm();
+ statement_position_ = masm->current_statement_position();
+ position_ = masm->current_position();
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
+
+ cgen()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
}
-void CodeGenerator::ClearDeferred() {
- for (int i = 0; i < deferred_.length(); i++) {
- deferred_[i]->Clear();
- }
-}
-
-
void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
- MacroAssembler* masm = code->masm();
+ MacroAssembler* masm = code->cgen()->masm();
// Record position of deferred code stub.
masm->RecordStatementPosition(code->statement_position());
if (code->position() != RelocInfo::kNoPosition) {
@@ -80,7 +72,6 @@
Comment cmnt(masm, code->comment());
code->Generate();
ASSERT(code->enter()->is_bound());
- code->Clear();
}
}
@@ -515,8 +506,8 @@
// frame. Otherwise, we have to merge the existing one to the
// start frame as part of the previous case.
if (!has_valid_frame()) {
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
- SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
+ RegisterFile empty;
+ SetFrame(new VirtualFrame(start_frame), &empty);
} else {
frame_->MergeTo(start_frame);
}
diff --git a/src/codegen.h b/src/codegen.h
index 487a7a4..9df2b49 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -52,7 +52,6 @@
// CodeGenerator
// ~CodeGenerator
// ProcessDeferred
-// ClearDeferred
// GenCode
// BuildBoilerplate
// ComputeCallInitialize
@@ -116,33 +115,17 @@
};
-// Use lazy compilation; defaults to true.
-// NOTE: Do not remove non-lazy compilation until we can properly
-// install extensions with lazy compilation enabled. At the
-// moment, this doesn't work for the extensions in Google3,
-// and we can only run the tests with --nolazy.
-
-
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
class DeferredCode: public ZoneObject {
public:
- explicit DeferredCode(CodeGenerator* generator);
+ DeferredCode();
virtual ~DeferredCode() { }
virtual void Generate() = 0;
- // Unuse the entry and exit targets, deallocating all virtual frames
- // held by them. It will be impossible to emit a (correct) jump
- // into or out of the deferred code after clearing.
- void Clear() {
- enter_.Unuse();
- exit_.Unuse();
- }
-
- MacroAssembler* masm() const { return masm_; }
- CodeGenerator* generator() const { return generator_; }
+ CodeGenerator* cgen() const { return CodeGeneratorScope::Current(); }
// Set the virtual frame for entry to the deferred code as a
// snapshot of the code generator's current frame (plus additional
@@ -169,13 +152,11 @@
void set_comment(const char* comment) { comment_ = comment; }
const char* comment() const { return comment_; }
#else
- inline void set_comment(const char* comment) { }
+ void set_comment(const char* comment) { }
const char* comment() const { return ""; }
#endif
protected:
- CodeGenerator* const generator_;
- MacroAssembler* const masm_;
JumpTarget enter_;
JumpTarget exit_;
diff --git a/src/debug.cc b/src/debug.cc
index becfaa6..0229df5 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -443,7 +443,7 @@
thread_local_.step_into_fp_ = 0;
thread_local_.after_break_target_ = 0;
thread_local_.debugger_entry_ = NULL;
- thread_local_.preemption_pending_ = false;
+ thread_local_.pending_interrupts_ = 0;
}
@@ -727,7 +727,7 @@
// Set the flag indicating that preemption happened during debugging.
void Debug::PreemptionWhileInDebugger() {
ASSERT(InDebugger());
- Debug::set_preemption_pending(true);
+ Debug::set_interrupts_pending(PREEMPT);
}
@@ -1927,6 +1927,11 @@
bool auto_continue) {
HandleScope scope;
+ // Clear any pending debug break if this is a real break.
+ if (!auto_continue) {
+ Debug::clear_interrupt_pending(DEBUGBREAK);
+ }
+
// Create the execution state.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
@@ -2034,7 +2039,12 @@
Handle<JSObject>::cast(event_data));
InvokeMessageHandler(message);
}
- if (auto_continue && !HasCommands()) {
+
+ // If auto continue don't make the event cause a break, but process messages
+ // in the queue if any. For script collected events don't even process
+ // messages in the queue as the execution state might not be what is expected
+ // by the client.
+ if (auto_continue && !HasCommands() || event == v8::ScriptCollected) {
return;
}
diff --git a/src/debug.h b/src/debug.h
index 3f90fa6..a1abced 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -282,11 +282,19 @@
thread_local_.debugger_entry_ = entry;
}
- static bool preemption_pending() {
- return thread_local_.preemption_pending_;
+ // Check whether any of the specified interrupts are pending.
+ static bool is_interrupt_pending(InterruptFlag what) {
+ return (thread_local_.pending_interrupts_ & what) != 0;
}
- static void set_preemption_pending(bool preemption_pending) {
- thread_local_.preemption_pending_ = preemption_pending;
+
+ // Set specified interrupts as pending.
+ static void set_interrupts_pending(InterruptFlag what) {
+ thread_local_.pending_interrupts_ |= what;
+ }
+
+ // Clear specified interrupts from pending.
+ static void clear_interrupt_pending(InterruptFlag what) {
+ thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
}
// Getter and setter for the disable break state.
@@ -431,8 +439,8 @@
// Top debugger entry.
EnterDebugger* debugger_entry_;
- // Preemption happened while debugging.
- bool preemption_pending_;
+ // Pending interrupts scheduled while debugging.
+ int pending_interrupts_;
};
// Storage location for registers when handling debug break calls
@@ -679,7 +687,8 @@
EnterDebugger()
: prev_(Debug::debugger_entry()),
has_js_frames_(!it_.done()) {
- ASSERT(prev_ == NULL ? !Debug::preemption_pending() : true);
+ ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
Debug::set_debugger_entry(this);
@@ -709,28 +718,41 @@
// Restore to the previous break state.
Debug::SetBreak(break_frame_id_, break_id_);
- // Request preemption when leaving the last debugger entry and a preemption
- // had been recorded while debugging. This is to avoid starvation in some
- // debugging scenarios.
- if (prev_ == NULL && Debug::preemption_pending()) {
- StackGuard::Preempt();
- Debug::set_preemption_pending(false);
- }
-
- // If there are commands in the queue when leaving the debugger request that
- // these commands are processed.
- if (prev_ == NULL && Debugger::HasCommands()) {
- StackGuard::DebugCommand();
- }
-
+ // Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!Top::has_pending_exception()) {
+ // Try to avoid any pending debug break breaking in the clear mirror
+ // cache JavaScript code.
+ if (StackGuard::IsDebugBreak()) {
+ Debug::set_interrupts_pending(DEBUGBREAK);
+ StackGuard::Continue(DEBUGBREAK);
+ }
Debug::ClearMirrorCache();
}
+
+ // Request preemption and debug break when leaving the last debugger entry
+ // if any of these where recorded while debugging.
+ if (Debug::is_interrupt_pending(PREEMPT)) {
+ // This re-scheduling of preemption is to avoid starvation in some
+ // debugging scenarios.
+ Debug::clear_interrupt_pending(PREEMPT);
+ StackGuard::Preempt();
+ }
+ if (Debug::is_interrupt_pending(DEBUGBREAK)) {
+ Debug::clear_interrupt_pending(DEBUGBREAK);
+ StackGuard::DebugBreak();
+ }
+
+ // If there are commands in the queue when leaving the debugger request
+ // that these commands are processed.
+ if (Debugger::HasCommands()) {
+ StackGuard::DebugCommand();
+ }
+
// If leaving the debugger with the debugger no longer active unload it.
if (!Debugger::IsDebuggerActive()) {
Debugger::UnloadDebugger();
diff --git a/src/execution.cc b/src/execution.cc
index 682cda6..fa3c2ec 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -588,20 +588,7 @@
return Heap::undefined_value();
}
- // Don't break in system functions. If the current function is
- // either in the builtins object of some context or is in the debug
- // context just return with the debug break stack guard active.
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- GlobalObject* global = JSFunction::cast(fun)->context()->global();
- if (global->IsJSBuiltinsObject() || Debug::IsDebugGlobal(global)) {
- return Heap::undefined_value();
- }
- }
-
- // Check for debug command break only.
+ // Collect the break state before clearing the flags.
bool debug_command_only =
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
@@ -609,11 +596,6 @@
StackGuard::Continue(DEBUGBREAK);
StackGuard::Continue(DEBUGCOMMAND);
- // If debug command only and already in debugger ignore it.
- if (debug_command_only && Debug::InDebugger()) {
- return Heap::undefined_value();
- }
-
HandleScope scope;
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
@@ -621,7 +603,8 @@
return Heap::undefined_value();
}
- // Notify the debug event listeners.
+ // Notify the debug event listeners. Indicate auto continue if the break was
+ // a debug command break.
Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
// Return to continue execution.
diff --git a/src/hashmap.cc b/src/hashmap.cc
index 7b6b90a..b717312 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -87,7 +87,7 @@
void HashMap::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
- Entry *p = Probe(key, hash);
+ Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index c1260d9..e09038a 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -79,8 +79,6 @@
int code_;
};
-const int kNumRegisters = 8;
-
extern Register eax;
extern Register ecx;
extern Register edx;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 5dec412..c72c126 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -318,9 +318,7 @@
DeleteFrame();
// Process any deferred code using the register allocator.
- if (HasStackOverflow()) {
- ClearDeferred();
- } else {
+ if (!HasStackOverflow()) {
HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
JumpTarget::set_compiling_deferred_code(true);
ProcessDeferred();
@@ -390,26 +388,25 @@
JumpTarget* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
ASSERT(tmp.is_register());
- Result context(esi);
+ Register context = esi;
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
slow->Branch(not_equal, not_taken);
}
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
__ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp;
+ context = tmp.reg();
}
}
// Check that last extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
- Immediate(0));
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
slow->Branch(not_equal, not_taken);
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::FCONTEXT_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
return ContextOperand(tmp.reg(), slot->index());
}
@@ -785,11 +782,10 @@
// the GenericBinaryOpStub stub for slow cases.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
- DeferredInlineBinaryOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineBinaryOperation(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : DeferredCode(generator), stub_(op, mode, flags), op_(op) {
+ : stub_(op, mode, flags), op_(op) {
set_comment("[ DeferredInlineBinaryOperation");
}
@@ -808,9 +804,9 @@
Result left;
Result right;
enter()->Bind(&left, &right);
- generator()->frame()->Push(&left);
- generator()->frame()->Push(&right);
- Result answer = generator()->frame()->CallStub(&stub_, 2);
+ cgen()->frame()->Push(&left);
+ cgen()->frame()->Push(&right);
+ Result answer = cgen()->frame()->CallStub(&stub_, 2);
exit_.Jump(&answer);
}
@@ -1014,8 +1010,7 @@
// Implements a binary operation using a deferred code object
// and some inline code to operate on smis quickly.
DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(this, op, overwrite_mode,
- SMI_CODE_INLINED);
+ new DeferredInlineBinaryOperation(op, overwrite_mode, SMI_CODE_INLINED);
// Generate the inline code that handles some smi operations,
// and jumps to the deferred code for everything else.
Result answer = deferred->GenerateInlineCode(left, right);
@@ -1026,12 +1021,10 @@
class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlineSmiOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperation(Token::Value op,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperation");
@@ -1049,22 +1042,20 @@
void DeferredInlineSmiOperation::Generate() {
Result left;
enter()->Bind(&left);
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
+ cgen()->frame()->Push(&left);
+ cgen()->frame()->Push(value_);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
}
class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
- DeferredInlineSmiOperationReversed(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperationReversed(Token::Value op,
Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperationReversed");
@@ -1082,21 +1073,19 @@
void DeferredInlineSmiOperationReversed::Generate() {
Result right;
enter()->Bind(&right);
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
+ cgen()->frame()->Push(value_);
+ cgen()->frame()->Push(&right);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
}
class DeferredInlineSmiAdd: public DeferredCode {
public:
- DeferredInlineSmiAdd(CodeGenerator* generator,
- Smi* value,
+ DeferredInlineSmiAdd(Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
+ : value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAdd");
}
@@ -1109,28 +1098,11 @@
};
-void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- Result left; // Initially left + value_.
- enter()->Bind(&left);
- left.ToRegister();
- generator()->frame()->Spill(left.reg());
- __ sub(Operand(left.reg()), Immediate(value_));
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
-}
-
-
class DeferredInlineSmiAddReversed: public DeferredCode {
public:
- DeferredInlineSmiAddReversed(CodeGenerator* generator,
- Smi* value,
+ DeferredInlineSmiAddReversed(Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
+ : value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAddReversed");
}
@@ -1143,28 +1115,11 @@
};
-void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- Result right; // Initially value_ + right.
- enter()->Bind(&right);
- right.ToRegister();
- generator()->frame()->Spill(right.reg());
- __ sub(Operand(right.reg()), Immediate(value_));
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
- exit_.Jump(&answer);
-}
-
-
class DeferredInlineSmiSub: public DeferredCode {
public:
- DeferredInlineSmiSub(CodeGenerator* generator,
- Smi* value,
+ DeferredInlineSmiSub(Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
+ : value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiSub");
}
@@ -1177,28 +1132,64 @@
};
+#undef __
+#define __ ACCESS_MASM(cgen()->masm())
+
+
+void DeferredInlineSmiAdd::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Result left; // Initially left + value_.
+ enter()->Bind(&left);
+ left.ToRegister();
+ cgen()->frame()->Spill(left.reg());
+ __ sub(Operand(left.reg()), Immediate(value_));
+ cgen()->frame()->Push(&left);
+ cgen()->frame()->Push(value_);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Result right; // Initially value_ + right.
+ enter()->Bind(&right);
+ right.ToRegister();
+ cgen()->frame()->Spill(right.reg());
+ __ sub(Operand(right.reg()), Immediate(value_));
+ cgen()->frame()->Push(value_);
+ cgen()->frame()->Push(&right);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
Result left; // Initially left - value_.
enter()->Bind(&left);
left.ToRegister();
- generator()->frame()->Spill(left.reg());
+ cgen()->frame()->Spill(left.reg());
__ add(Operand(left.reg()), Immediate(value_));
- generator()->frame()->Push(&left);
- generator()->frame()->Push(value_);
+ cgen()->frame()->Push(&left);
+ cgen()->frame()->Push(value_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+
class DeferredInlineSmiSubReversed: public DeferredCode {
public:
- DeferredInlineSmiSubReversed(CodeGenerator* generator,
- Smi* value,
+ DeferredInlineSmiSubReversed(Smi* value,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- value_(value),
+ : value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiSubReversed");
}
@@ -1215,10 +1206,10 @@
// Call the shared stub.
Result right;
enter()->Bind(&right);
- generator()->frame()->Push(value_);
- generator()->frame()->Push(&right);
+ cgen()->frame()->Push(value_);
+ cgen()->frame()->Push(&right);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- Result answer = generator()->frame()->CallStub(&igostub, 2);
+ Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
}
@@ -1261,10 +1252,9 @@
DeferredCode* deferred = NULL;
if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(this, smi_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiAddReversed(smi_value, overwrite_mode);
} else {
- deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
+ deferred = new DeferredInlineSmiAdd(smi_value, overwrite_mode);
}
deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken);
@@ -1281,8 +1271,7 @@
if (reversed) {
answer = allocator()->Allocate();
ASSERT(answer.is_valid());
- deferred = new DeferredInlineSmiSubReversed(this, smi_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiSubReversed(smi_value, overwrite_mode);
__ Set(answer.reg(), Immediate(value));
// We are in the reversed case so they can't both be Smi constants.
ASSERT(operand->is_register());
@@ -1290,7 +1279,7 @@
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
- deferred = new DeferredInlineSmiSub(this, smi_value, overwrite_mode);
+ deferred = new DeferredInlineSmiSub(smi_value, overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
answer = *operand;
}
@@ -1314,8 +1303,7 @@
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SAR, smi_value,
- overwrite_mode);
+ new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
@@ -1340,8 +1328,7 @@
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SHR, smi_value,
- overwrite_mode);
+ new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
@@ -1375,8 +1362,7 @@
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, Token::SHL, smi_value,
- overwrite_mode);
+ new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
@@ -1409,10 +1395,10 @@
case Token::BIT_AND: {
DeferredCode* deferred = NULL;
if (reversed) {
- deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value,
+ deferred = new DeferredInlineSmiOperationReversed(op, smi_value,
overwrite_mode);
} else {
- deferred = new DeferredInlineSmiOperation(this, op, smi_value,
+ deferred = new DeferredInlineSmiOperation(op, smi_value,
overwrite_mode);
}
operand->ToRegister();
@@ -1698,8 +1684,7 @@
class DeferredStackCheck: public DeferredCode {
public:
- explicit DeferredStackCheck(CodeGenerator* generator)
- : DeferredCode(generator) {
+ explicit DeferredStackCheck() {
set_comment("[ DeferredStackCheck");
}
@@ -1710,7 +1695,7 @@
void DeferredStackCheck::Generate() {
enter()->Bind();
StackCheckStub stub;
- Result ignored = generator()->frame()->CallStub(&stub, 0);
+ Result ignored = cgen()->frame()->CallStub(&stub, 0);
ignored.Unuse();
exit_.Jump();
}
@@ -1718,7 +1703,7 @@
void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck(this);
+ DeferredStackCheck* deferred = new DeferredStackCheck;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
@@ -1772,13 +1757,14 @@
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->Push(pairs);
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
- // Duplicate the context register.
- Result context(esi);
- frame_->Push(&context);
-
- frame_->Push(Smi::FromInt(is_eval() ? 1 : 0));
+ frame_->EmitPush(Immediate(pairs));
+ frame_->EmitPush(esi); // The context is the second argument.
+ frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -1798,24 +1784,25 @@
// Variables with a "LOOKUP" slot were introduced as non-locals
// during variable resolution and must have mode DYNAMIC.
ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Duplicate the context register.
- Result context(esi);
- frame_->Push(&context);
- frame_->Push(var->name());
+ // For now, just do a runtime call. Sync the virtual frame eagerly
+ // so we can simply push the arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->Push(Smi::FromInt(attr));
+ frame_->EmitPush(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- frame_->Push(Factory::the_hole_value());
+ frame_->EmitPush(Immediate(Factory::the_hole_value()));
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- frame_->Push(Smi::FromInt(0)); // no initial value!
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
}
Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
@@ -3189,13 +3176,18 @@
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ // Call the runtime to instantiate the function boilerplate object.
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
ASSERT(boilerplate->IsBoilerplate());
+ frame_->SyncRange(0, frame_->element_count() - 1);
// Push the boilerplate on the stack.
- frame_->Push(boilerplate);
+ frame_->EmitPush(Immediate(boilerplate));
// Create a new closure.
- frame_->Push(esi);
+ frame_->EmitPush(esi);
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
@@ -3301,8 +3293,12 @@
}
slow.Bind();
- frame_->Push(esi);
- frame_->Push(slot->var()->name());
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
value =
frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@@ -3357,7 +3353,7 @@
JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
- Result context(esi);
+ Register context = esi;
Result tmp = allocator_->Allocate();
ASSERT(tmp.is_valid()); // All non-reserved registers were available.
@@ -3366,14 +3362,14 @@
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
slow->Branch(not_equal, not_taken);
}
// Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
__ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp;
+ context = tmp.reg();
}
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
@@ -3386,8 +3382,8 @@
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
- if (!context.reg().is(tmp.reg())) {
- __ mov(tmp.reg(), context.reg());
+ if (!context.is(tmp.reg())) {
+ __ mov(tmp.reg(), context);
}
__ bind(&next);
// Terminate at global context.
@@ -3403,7 +3399,6 @@
__ jmp(&next);
__ bind(&fast);
}
- context.Unuse();
tmp.Unuse();
// All extension objects were empty and it is safe to use a global
@@ -3428,9 +3423,13 @@
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- // For now, just do a runtime call.
- frame_->Push(esi);
- frame_->Push(slot->var()->name());
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(slot->var()->name()));
Result value;
if (init_state == CONST_INIT) {
@@ -3562,8 +3561,7 @@
class DeferredRegExpLiteral: public DeferredCode {
public:
- DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredRegExpLiteral(RegExpLiteral* node) : node_(node) {
set_comment("[ DeferredRegExpLiteral");
}
@@ -3580,7 +3578,7 @@
// Since the entry is undefined we call the runtime system to
// compute the literal.
- VirtualFrame* frame = generator()->frame();
+ VirtualFrame* frame = cgen()->frame();
// Literal array (0).
frame->Push(&literals);
// Literal index (1).
@@ -3597,7 +3595,7 @@
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RegExp Literal");
- DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node);
+ DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -3638,9 +3636,7 @@
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode {
public:
- DeferredObjectLiteral(CodeGenerator* generator,
- ObjectLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
set_comment("[ DeferredObjectLiteral");
}
@@ -3657,7 +3653,7 @@
// Since the entry is undefined we call the runtime system to
// compute the literal.
- VirtualFrame* frame = generator()->frame();
+ VirtualFrame* frame = cgen()->frame();
// Literal array (0).
frame->Push(&literals);
// Literal index (1).
@@ -3672,7 +3668,7 @@
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -3776,9 +3772,7 @@
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode {
public:
- DeferredArrayLiteral(CodeGenerator* generator,
- ArrayLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
set_comment("[ DeferredArrayLiteral");
}
@@ -3795,7 +3789,7 @@
// Since the entry is undefined we call the runtime system to
// compute the literal.
- VirtualFrame* frame = generator()->frame();
+ VirtualFrame* frame = cgen()->frame();
// Literal array (0).
frame->Push(&literals);
// Literal index (1).
@@ -3810,7 +3804,7 @@
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
+ DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a
@@ -4061,15 +4055,23 @@
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// ----------------------------------
- // Load the function
- frame_->Push(esi);
- frame_->Push(var->name());
+ // Load the function from the context. Sync the frame so we can
+ // push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(var->name()));
frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // eax: slot value; edx: receiver
+ // The runtime call returns a pair of values in eax and edx. The
+ // looked-up function is in eax and the receiver is in edx. These
+ // register references are not ref counted here. We spill them
+ // eagerly since they are arguments to an inevitable call (and are
+ // not sharable by the arguments).
+ ASSERT(!allocator()->is_used(eax));
+ frame_->EmitPush(eax);
// Load the receiver.
- frame_->Push(eax);
- frame_->Push(edx);
+ ASSERT(!allocator()->is_used(edx));
+ frame_->EmitPush(edx);
// Call the function.
CallWithArguments(args, node->position());
@@ -4638,12 +4640,17 @@
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // lookup the context holding the named variable
- frame_->Push(esi);
- frame_->Push(variable->name());
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(variable->name()));
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- frame_->Push(&context);
- frame_->Push(variable->name());
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
frame_->Push(&answer);
@@ -4752,12 +4759,10 @@
class DeferredCountOperation: public DeferredCode {
public:
- DeferredCountOperation(CodeGenerator* generator,
- bool is_postfix,
+ DeferredCountOperation(bool is_postfix,
bool is_increment,
int target_size)
- : DeferredCode(generator),
- is_postfix_(is_postfix),
+ : is_postfix_(is_postfix),
is_increment_(is_increment),
target_size_(target_size) {
set_comment("[ DeferredCountOperation");
@@ -4772,11 +4777,14 @@
};
+#undef __
+#define __ ACCESS_MASM(cgen()->masm())
+
+
void DeferredCountOperation::Generate() {
- CodeGenerator* cgen = generator();
Result value;
enter()->Bind(&value);
- VirtualFrame* frame = cgen->frame();
+ VirtualFrame* frame = cgen()->frame();
// Undo the optimistic smi operation.
value.ToRegister();
frame->Spill(value.reg());
@@ -4804,6 +4812,10 @@
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+
void CodeGenerator::VisitCountOperation(CountOperation* node) {
Comment cmnt(masm_, "[ CountOperation");
@@ -4831,8 +4843,7 @@
target.TakeValue(NOT_INSIDE_TYPEOF);
DeferredCountOperation* deferred =
- new DeferredCountOperation(this, is_postfix,
- is_increment, target.size());
+ new DeferredCountOperation(is_postfix, is_increment, target.size());
Result value = frame_->Pop();
value.ToRegister();
@@ -5256,8 +5267,7 @@
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
- DeferredReferenceGetNamedValue(CodeGenerator* cgen, Handle<String> name)
- : DeferredCode(cgen), name_(name) {
+ explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
@@ -5271,35 +5281,10 @@
};
-void DeferredReferenceGetNamedValue::Generate() {
- CodeGenerator* cgen = generator();
- Result receiver;
- enter()->Bind(&receiver);
-
- cgen->frame()->Push(&receiver);
- cgen->frame()->Push(name_);
- Result answer = cgen->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- // The call must be followed by a test eax instruction to indicate
- // that the inobject property case was inlined.
- ASSERT(answer.is_register() && answer.reg().is(eax));
- // Store the delta to the map check instruction here in the test instruction.
- // Use masm_-> instead of the double underscore macro since the latter can't
- // return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the double underscore macro because
- // this is the instruction that gets patched and coverage code gets in
- // the way.
- masm_->test(answer.reg(), Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
- receiver = cgen->frame()->Pop();
- exit_.Jump(&receiver, &answer);
-}
-
-
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
- DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global)
- : DeferredCode(generator), is_global_(is_global) {
+ explicit DeferredReferenceGetKeyedValue(bool is_global)
+ : is_global_(is_global) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
@@ -5313,13 +5298,41 @@
};
+#undef __
+#define __ ACCESS_MASM(cgen()->masm())
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+ Result receiver;
+ enter()->Bind(&receiver);
+
+ cgen()->frame()->Push(&receiver);
+ cgen()->frame()->Push(name_);
+ Result answer = cgen()->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ // The call must be followed by a test eax instruction to indicate
+ // that the inobject property case was inlined.
+ ASSERT(answer.is_register() && answer.reg().is(eax));
+ // Store the delta to the map check instruction here in the test
+ // instruction. Use cgen()->masm()-> instead of the __ macro since
+ // the latter can't return a value.
+ int delta_to_patch_site =
+ cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use cgen()->masm()-> instead of the __ macro because this
+ // is the instruction that gets patched and coverage code gets in the
+ // way.
+ cgen()->masm()->test(answer.reg(), Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ receiver = cgen()->frame()->Pop();
+ exit_.Jump(&receiver, &answer);
+}
+
+
void DeferredReferenceGetKeyedValue::Generate() {
- CodeGenerator* cgen = generator();
Result receiver;
Result key;
enter()->Bind(&receiver, &key);
- cgen->frame()->Push(&receiver); // First IC argument.
- cgen->frame()->Push(&key); // Second IC argument.
+ cgen()->frame()->Push(&receiver); // First IC argument.
+ cgen()->frame()->Push(&key); // Second IC argument.
// Calculate the delta from the IC call instruction to the map check
// cmp instruction in the inlined version. This delta is stored in
@@ -5330,28 +5343,30 @@
RelocInfo::Mode mode = is_global_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
- Result value = cgen->frame()->CallKeyedLoadIC(mode);
+ Result value = cgen()->frame()->CallKeyedLoadIC(mode);
// The result needs to be specifically the eax register because the
// offset to the patch site will be expected in a test eax
// instruction.
ASSERT(value.is_register() && value.reg().is(eax));
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_ directly here instead of the
- // double underscore macro because the macro sometimes uses macro
- // expansion to turn into something that can't return a value. This
- // is encountered when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the double underscore macro because this
- // is the instruction that gets patched and coverage code gets in the way.
- masm_->test(value.reg(), Immediate(-delta_to_patch_site));
+ // The delta from the start of the map-compare instruction to the test
+ // instruction. We use cgen()->masm() directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn into
+ // something that can't return a value. This is encountered when
+ // doing generated code coverage tests.
+ int delta_to_patch_site =
+ cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use cgen()->masm()-> instead of the __ macro because this
+ // is the instruction that gets patched and coverage code gets in the
+ // way.
+ cgen()->masm()->test(value.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
// The receiver and key were spilled by the call, so their state as
// constants or copies has been changed. Thus, they need to be
// "mergable" in the block at the exit label and are therefore
// passed as return results here.
- key = cgen->frame()->Pop();
- receiver = cgen->frame()->Pop();
+ key = cgen()->frame()->Pop();
+ receiver = cgen()->frame()->Pop();
exit_.Jump(&receiver, &key, &value);
}
@@ -5424,7 +5439,7 @@
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(cgen_, GetName());
+ new DeferredReferenceGetNamedValue(GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
@@ -5489,7 +5504,7 @@
if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined array index load");
DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(cgen_, is_global);
+ new DeferredReferenceGetKeyedValue(is_global);
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
@@ -5717,11 +5732,9 @@
}
-#undef __
-#define __ ACCESS_MASM(masm_)
-
Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
Result* right) {
+ MacroAssembler* masm = cgen()->masm();
// Perform fast-case smi code for the operation (left <op> right) and
// returns the result in a Result.
// If any fast-case tests fail, it jumps to the slow-case deferred code,
@@ -5735,7 +5748,7 @@
// A newly allocated register answer is used to hold the answer.
// The registers containing left and right are not modified in
// most cases, so they usually don't need to be spilled in the fast case.
- Result answer = generator()->allocator()->Allocate();
+ Result answer = cgen()->allocator()->Allocate();
ASSERT(answer.is_valid());
// Perform the smi check.
@@ -5806,8 +5819,8 @@
// from left and right, and is spilled.
// The value in left is copied to answer.
- Result reg_eax = generator()->allocator()->Allocate(eax);
- Result reg_edx = generator()->allocator()->Allocate(edx);
+ Result reg_eax = cgen()->allocator()->Allocate(eax);
+ Result reg_edx = cgen()->allocator()->Allocate(edx);
// These allocations may have failed, if one of left, right, or answer
// is in register eax or edx.
bool left_copied_to_eax = false; // We will make sure this becomes true.
@@ -5821,7 +5834,7 @@
// We use answer if it is not edx, otherwise we allocate one.
if (answer.reg().is(edx)) {
reg_edx = answer;
- answer = generator()->allocator()->Allocate();
+ answer = cgen()->allocator()->Allocate();
ASSERT(answer.is_valid());
}
@@ -5852,7 +5865,7 @@
// Is answer used?
if (answer.reg().is(eax) || answer.reg().is(left->reg()) ||
answer.reg().is(right->reg())) {
- answer = generator()->allocator()->Allocate();
+ answer = cgen()->allocator()->Allocate();
ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls.
}
if (left->reg().is(edx)) {
@@ -5871,12 +5884,12 @@
ASSERT(!right->reg().is(eax));
answer = reg_eax; // May free answer, if it was never used.
- generator()->frame()->Spill(eax);
+ cgen()->frame()->Spill(eax);
if (!left_copied_to_eax) {
__ mov(eax, left->reg());
left_copied_to_eax = true;
}
- generator()->frame()->Spill(edx);
+ cgen()->frame()->Spill(edx);
// Postcondition:
// reg_eax, reg_edx are valid, correct, and spilled.
@@ -5966,7 +5979,7 @@
// spilling left.
*left = answer;
} else if (left->reg().is(ecx)) {
- generator()->frame()->Spill(left->reg());
+ cgen()->frame()->Spill(left->reg());
__ mov(left->reg(), right->reg());
*right = *left;
*left = answer; // Use copy of left in answer as left.
@@ -5974,7 +5987,7 @@
__ mov(answer.reg(), right->reg());
*right = answer;
} else {
- Result reg_ecx = generator()->allocator()->Allocate(ecx);
+ Result reg_ecx = cgen()->allocator()->Allocate(ecx);
ASSERT(reg_ecx.is_valid());
__ mov(ecx, right->reg());
*right = reg_ecx;
@@ -5992,8 +6005,8 @@
// the same answer.
// We are modifying left and right. They must be spilled!
- generator()->frame()->Spill(left->reg());
- generator()->frame()->Spill(right->reg());
+ cgen()->frame()->Spill(left->reg());
+ cgen()->frame()->Spill(right->reg());
// Remove tags from operands (but keep sign).
__ sar(left->reg(), kSmiTagSize);
@@ -6063,9 +6076,6 @@
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
// leave result in register eax.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 1307727..9b609a1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -333,8 +333,7 @@
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -580,8 +579,8 @@
void CodeForSourcePosition(int pos);
#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should be
- // no frame-external references to eax, ebx, ecx, edx, or edi.
+ // True if the registers are valid for entry to a block. There should
+ // be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
index 55421a2..9644a16 100644
--- a/src/ia32/jump-target-ia32.cc
+++ b/src/ia32/jump-target-ia32.cc
@@ -84,7 +84,7 @@
// branch.
VirtualFrame* fall_through_frame = cgen()->frame();
VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+ RegisterFile non_frame_registers;
cgen()->SetFrame(branch_frame, &non_frame_registers);
// Check if we can avoid merge code.
@@ -179,14 +179,14 @@
ASSERT(reaching_frames_.is_empty());
ASSERT(!cgen()->has_valid_frame());
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
// Copy the entry frame so the original can be used for a
// possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved);
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
} else {
// Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &reserved);
+ cgen()->SetFrame(entry_frame_, &empty);
entry_frame_ = NULL;
}
__ bind(&entry_label_);
@@ -200,8 +200,7 @@
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
VirtualFrame* frame = cgen()->frame();
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(Operand(esp), Immediate(difference * kPointerSize));
@@ -225,15 +224,14 @@
// possible backward jumps. Pick up the only reaching frame, take
// ownership of it, and use it for the block about to be emitted.
VirtualFrame* frame = reaching_frames_[0];
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[0] = NULL;
__ bind(&merge_labels_[0]);
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(Operand(esp), Immediate(difference * kPointerSize));
@@ -291,11 +289,11 @@
}
// Pick up the frame for this block. Assume ownership if
// there cannot be backward jumps.
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &reserved);
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
} else {
- cgen()->SetFrame(frame, &reserved);
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
__ bind(&merge_labels_[i]);
@@ -318,8 +316,8 @@
// If this is the fall through frame, and it didn't need
// merge code, we need to pick up the frame so we can jump
// around subsequent merge blocks if necessary.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
}
@@ -329,8 +327,8 @@
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen()->has_valid_frame()) {
- RegisterFile reserved_registers = RegisterAllocator::Reserved();
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
// There may be unprocessed reaching frames that did not need
@@ -355,8 +353,8 @@
// Use a copy of the reaching frame so the original can be saved
// for possible reuse as a backward merge block.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
__ bind(&merge_labels_[0]);
cgen()->frame()->MergeTo(entry_frame_);
}
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 92cd019..04a5390 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1195,10 +1195,11 @@
const byte* new_address = StringCharacterPosition(*subject, start_index);
if (start_address != new_address) {
- // If there is a difference, update start and end addresses in the
- // RegExp stack frame to match the new value.
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
}
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
new file mode 100644
index 0000000..ddee472
--- /dev/null
+++ b/src/ia32/register-allocator-ia32-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // The code for this test relies on the order of register codes.
+ return reg.code() >= esp.code() && reg.code() <= esi.code();
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+
+// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // eax
+ 2, // ecx
+ 3, // edx
+ 1, // ebx
+ -1, // esp
+ -1, // ebp
+ -1, // esi
+ 4 // edi
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] = { eax, ebx, ecx, edx, edi };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved edi register is live on JS function entry.
+ Use(edi); // JS function.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index 2a51e87..2914960 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -84,46 +84,6 @@
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
-RegisterFile RegisterAllocator::Reserved() {
- RegisterFile reserved;
- reserved.Use(esp);
- reserved.Use(ebp);
- reserved.Use(esi);
- return reserved;
-}
-
-
-void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
- register_file->ref_counts_[esp.code()] = 0;
- register_file->ref_counts_[ebp.code()] = 0;
- register_file->ref_counts_[esi.code()] = 0;
-}
-
-
-bool RegisterAllocator::IsReserved(int reg_code) {
- // Test below relies on the order of register codes.
- return reg_code >= esp.code() && reg_code <= esi.code();
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The following register is live on function entry, saved in the
- // frame, and available for allocation during execution.
- Use(edi); // JS function.
-}
-
-
-void RegisterAllocator::Reset() {
- registers_.Reset();
- // The following registers are live on function entry and reserved
- // during execution.
- Use(esp); // Stack pointer.
- Use(ebp); // Frame pointer (caller's frame pointer on entry).
- Use(esi); // Context (callee's context on entry).
-}
-
-
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
Result result = AllocateWithoutSpilling();
// Check that the register is a byte register. If not, unuse the
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
new file mode 100644
index 0000000..e7ce91f
--- /dev/null
+++ b/src/ia32/register-allocator-ia32.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 5;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index f86613d..5f85de7 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -47,7 +47,7 @@
for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
@@ -156,7 +156,7 @@
// [min(stack_pointer_ + 1,begin), end].
void VirtualFrame::SyncRange(int begin, int end) {
ASSERT(begin >= 0);
- ASSERT(end < elements_.length());
+ ASSERT(end < element_count());
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
@@ -176,12 +176,12 @@
void VirtualFrame::MakeMergable(int mergable_elements) {
if (mergable_elements == JumpTarget::kAllElements) {
- mergable_elements = elements_.length();
+ mergable_elements = element_count();
}
- ASSERT(mergable_elements <= elements_.length());
+ ASSERT(mergable_elements <= element_count());
- int start_index = elements_.length() - mergable_elements;
- for (int i = start_index; i < elements_.length(); i++) {
+ int start_index = element_count() - mergable_elements;
+ for (int i = start_index; i < element_count(); i++) {
FrameElement element = elements_[i];
if (element.is_constant() || element.is_copy()) {
@@ -281,7 +281,7 @@
// of the index of the frame element esi is caching or kIllegalIndex
// if esi has not been disturbed.
int esi_caches = kIllegalIndex;
- for (int i = elements_.length() - 1; i >= 0; i--) {
+ for (int i = element_count() - 1; i >= 0; i--) {
FrameElement target = expected->elements_[i];
if (target.is_register()) continue; // Handle registers later.
if (target.is_memory()) {
@@ -347,17 +347,16 @@
// We have already done X-to-memory moves.
ASSERT(stack_pointer_ >= expected->stack_pointer_);
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
// Move the right value into register i if it is currently in a register.
- int index = expected->register_locations_[i];
- int use_index = register_locations_[i];
+ int index = expected->register_location(i);
+ int use_index = register_location(i);
// Skip if register i is unused in the target or else if source is
// not a register (this is not a register-to-register move).
if (index == kIllegalIndex || !elements_[index].is_register()) continue;
- Register target = { i };
+ Register target = RegisterAllocator::ToRegister(i);
Register source = elements_[index].reg();
-
if (index != use_index) {
if (use_index == kIllegalIndex) { // Target is currently unused.
// Copy contents of source from source to target.
@@ -369,8 +368,8 @@
// Exchange contents of registers source and target.
// Nothing except the register backing use_index has changed.
elements_[use_index].set_reg(source);
- register_locations_[target.code()] = index;
- register_locations_[source.code()] = use_index;
+ set_register_location(target, index);
+ set_register_location(source, use_index);
__ xchg(source, target);
}
}
@@ -390,20 +389,24 @@
// register code order, we have special code to ensure that the backing
// elements of copies are in their correct locations when we
// encounter the copies.
- for (int i = 0; i < kNumRegisters; i++) {
- int index = expected->register_locations_[i];
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int index = expected->register_location(i);
if (index != kIllegalIndex) {
FrameElement source = elements_[index];
FrameElement target = expected->elements_[index];
- Register target_reg = { i };
+ Register target_reg = RegisterAllocator::ToRegister(i);
ASSERT(target.reg().is(target_reg));
switch (source.type()) {
case FrameElement::INVALID: // Fall through.
UNREACHABLE();
break;
case FrameElement::REGISTER:
- ASSERT(source.reg().is(target_reg));
- continue; // Go to next iteration. Skips Use(target_reg) below.
+ ASSERT(source.Equals(target));
+ // Go to next iteration. Skips Use(target_reg) and syncing
+ // below. It is safe to skip syncing because a target
+ // register frame element would only be synced if all source
+ // elements were.
+ continue;
break;
case FrameElement::MEMORY:
ASSERT(index <= stack_pointer_);
@@ -443,8 +446,7 @@
}
}
}
- // Ensure the proper sync state. If the source was memory no
- // code needs to be emitted.
+ // Ensure the proper sync state.
if (target.is_synced() && !source.is_synced()) {
__ mov(Operand(ebp, fp_relative(index)), target_reg);
}
@@ -482,7 +484,7 @@
// Store the function in the frame. The frame owns the register
// reference now (ie, it can keep it in edi or spill it later).
Push(edi);
- SyncElementAt(elements_.length() - 1);
+ SyncElementAt(element_count() - 1);
cgen()->allocator()->Unuse(edi);
}
@@ -499,7 +501,7 @@
// debugger. See VisitReturnStatement for the full return sequence.
__ mov(esp, Operand(ebp));
stack_pointer_ = frame_pointer();
- for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
+ for (int i = element_count() - 1; i > stack_pointer_; i--) {
FrameElement last = elements_.RemoveLast();
if (last.is_register()) {
Unuse(last.reg());
@@ -518,7 +520,7 @@
// we sync them with the actual frame to allocate space for spilling
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, elements_.length() - 1);
+ SyncRange(stack_pointer_ + 1, element_count() - 1);
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
@@ -561,7 +563,7 @@
int new_backing_index = kIllegalIndex;
if (original.is_copied()) {
// Verify it is copied, and find first copy.
- for (int i = index + 1; i < elements_.length(); i++) {
+ for (int i = index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == index) {
new_backing_index = i;
break;
@@ -589,7 +591,7 @@
} else {
// The original was in a register.
backing_reg = original.reg();
- register_locations_[backing_reg.code()] = new_backing_index;
+ set_register_location(backing_reg, new_backing_index);
}
// Invalidate the element at index.
elements_[index] = FrameElement::InvalidElement();
@@ -602,7 +604,7 @@
FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
}
// Update the other copies.
- for (int i = new_backing_index + 1; i < elements_.length(); i++) {
+ for (int i = new_backing_index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == index) {
elements_[i].set_index(new_backing_index);
elements_[new_backing_index].set_copied();
@@ -614,7 +616,7 @@
void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(index >= 0);
- ASSERT(index <= elements_.length());
+ ASSERT(index <= element_count());
FrameElement original = elements_[index];
int new_backing_store_index = InvalidateFrameSlotAt(index);
if (new_backing_store_index != kIllegalIndex) {
@@ -631,13 +633,13 @@
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
- Use(fresh.reg(), elements_.length());
+ Use(fresh.reg(), element_count());
elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
break;
}
case FrameElement::REGISTER:
- Use(original.reg(), elements_.length());
+ Use(original.reg(), element_count());
// Fall through.
case FrameElement::CONSTANT:
case FrameElement::COPY:
@@ -656,9 +658,9 @@
// a given index. The value on top of the frame is left in place.
// This is a duplicating operation, so it can create copies.
ASSERT(index >= 0);
- ASSERT(index < elements_.length());
+ ASSERT(index < element_count());
- int top_index = elements_.length() - 1;
+ int top_index = element_count() - 1;
FrameElement top = elements_[top_index];
FrameElement original = elements_[index];
if (top.is_copy() && top.index() == index) return;
@@ -705,7 +707,7 @@
__ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else {
- register_locations_[backing_element.reg().code()] = index;
+ set_register_location(backing_element.reg(), index);
if (backing_element.is_synced()) {
// If the element is a register, we will not actually move
// anything on the stack but only update the virtual frame
@@ -725,7 +727,7 @@
// All the copies of the old backing element (including the top
// element) become copies of the new backing element.
- for (int i = backing_index + 1; i < elements_.length(); i++) {
+ for (int i = backing_index + 1; i < element_count(); i++) {
if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
elements_[i].set_index(index);
}
@@ -752,7 +754,7 @@
__ mov(temp.reg(), Operand(esp, 0));
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
} else if (top.is_register()) {
- register_locations_[top.reg().code()] = index;
+ set_register_location(top.reg(), index);
// The stored-to slot has the (unsynced) register reference and
// the top element becomes a copy. The sync state of the top is
// preserved.
@@ -976,7 +978,7 @@
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
- int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
@@ -997,7 +999,7 @@
Result VirtualFrame::Pop() {
FrameElement element = elements_.RemoveLast();
- int index = elements_.length();
+ int index = element_count();
ASSERT(element.is_valid());
bool pop_needed = (stack_pointer_ == index);
@@ -1052,7 +1054,7 @@
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
@@ -1060,7 +1062,7 @@
void VirtualFrame::EmitPop(Operand operand) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(operand);
@@ -1068,7 +1070,7 @@
void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
@@ -1076,7 +1078,7 @@
void VirtualFrame::EmitPush(Operand operand) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(operand);
@@ -1084,7 +1086,7 @@
void VirtualFrame::EmitPush(Immediate immediate) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(immediate);
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 9811d5a..54cfd9d 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -83,21 +83,35 @@
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
int height() {
- return elements_.length() - expression_base_index();
+ return element_count() - expression_base_index();
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+ }
+
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -112,7 +126,7 @@
// handler). No code will be emitted.
void Forget(int count) {
ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
ForgetElements(count);
}
@@ -127,7 +141,7 @@
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_index(reg));
+ if (is_used(reg)) SpillElementAt(register_location(reg));
}
// Spill all occurrences of an arbitrary register if possible. Return the
@@ -135,6 +149,9 @@
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
// Make this frame so that an arbitrary frame of the same height can
// be merged to it. Copies and constants are removed from the
// topmost mergable_elements elements of the frame. A
@@ -158,11 +175,8 @@
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Unuse(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
}
}
@@ -172,11 +186,8 @@
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Use(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Use(i);
}
}
@@ -211,11 +222,11 @@
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
void StoreToElementAt(int index) {
- StoreToFrameSlotAt(elements_.length() - index - 1);
+ StoreToFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
@@ -259,7 +270,7 @@
// A parameter as an assembly operand.
Operand ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
+ ASSERT(index < parameter_count());
return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
}
@@ -352,7 +363,7 @@
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -407,7 +418,7 @@
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
@@ -440,8 +451,8 @@
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
- ASSERT(index < elements_.length());
- ASSERT(frame_pointer() < elements_.length()); // FP is on the frame.
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
@@ -450,7 +461,7 @@
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
- register_locations_[reg.code()] = index;
+ set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
@@ -458,8 +469,8 @@
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
- ASSERT(register_locations_[reg.code()] != kIllegalIndex);
- register_locations_[reg.code()] = kIllegalIndex;
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
@@ -473,9 +484,6 @@
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
- void SyncRange(int begin, int end);
-
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 7500bf2..6fce1f5 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -4189,6 +4189,11 @@
void Analysis::EnsureAnalyzed(RegExpNode* that) {
+ StackLimitCheck check;
+ if (check.HasOverflowed()) {
+ fail("Stack overflow");
+ return;
+ }
if (that->info()->been_analyzed || that->info()->being_analyzed)
return;
that->info()->being_analyzed = true;
@@ -4226,16 +4231,20 @@
that->MakeCaseIndependent();
}
EnsureAnalyzed(that->on_success());
- that->CalculateOffsets();
+ if (!has_failed()) {
+ that->CalculateOffsets();
+ }
}
void Analysis::VisitAction(ActionNode* that) {
RegExpNode* target = that->on_success();
EnsureAnalyzed(target);
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
+ if (!has_failed()) {
+ // If the next node is interested in what it follows then this node
+ // has to be interested too so it can pass the information on.
+ that->info()->AddFromFollowing(target->info());
+ }
}
@@ -4244,6 +4253,7 @@
for (int i = 0; i < that->alternatives()->length(); i++) {
RegExpNode* node = that->alternatives()->at(i).node();
EnsureAnalyzed(node);
+ if (has_failed()) return;
// Anything the following nodes need to know has to be known by
// this node also, so it can pass it on.
info->AddFromFollowing(node->info());
@@ -4257,13 +4267,16 @@
RegExpNode* node = that->alternatives()->at(i).node();
if (node != that->loop_node()) {
EnsureAnalyzed(node);
+ if (has_failed()) return;
info->AddFromFollowing(node->info());
}
}
// Check the loop last since it may need the value of this node
// to get a correct result.
EnsureAnalyzed(that->loop_node());
- info->AddFromFollowing(that->loop_node()->info());
+ if (!has_failed()) {
+ info->AddFromFollowing(that->loop_node()->info());
+ }
}
@@ -4435,6 +4448,10 @@
data->node = node;
Analysis analysis(ignore_case);
analysis.EnsureAnalyzed(node);
+ if (analysis.has_failed()) {
+ const char* error_message = analysis.error_message();
+ return CompilationResult(error_message);
+ }
NodeInfo info = *node->info();
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 18bd19b..a86f7e6 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1310,7 +1310,7 @@
class Analysis: public NodeVisitor {
public:
explicit Analysis(bool ignore_case)
- : ignore_case_(ignore_case) { }
+ : ignore_case_(ignore_case), error_message_(NULL) { }
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1319,8 +1319,17 @@
#undef DECLARE_VISIT
virtual void VisitLoopChoice(LoopChoiceNode* that);
+ bool has_failed() { return error_message_ != NULL; }
+ const char* error_message() {
+ ASSERT(error_message_ != NULL);
+ return error_message_;
+ }
+ void fail(const char* error_message) {
+ error_message_ = error_message;
+ }
private:
bool ignore_case_;
+ const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
index 081821e..1f0676d 100644
--- a/src/jump-target-inl.h
+++ b/src/jump-target-inl.h
@@ -38,7 +38,7 @@
void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
entry_frame_->elements_[index].clear_copied();
if (target->is_register()) {
- entry_frame_->register_locations_[target->reg().code()] = index;
+ entry_frame_->set_register_location(target->reg(), index);
} else if (target->is_copy()) {
entry_frame_->elements_[target->index()].set_copied();
}
diff --git a/src/jump-target.cc b/src/jump-target.cc
index b89547b..a8eda6b 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -74,7 +74,7 @@
// A list of pointers to frame elements in the entry frame. NULL
// indicates that the element has not yet been determined.
- int length = initial_frame->elements_.length();
+ int length = initial_frame->element_count();
ZoneList<FrameElement*> elements(length);
// Convert the number of mergable elements (counted from the top
@@ -124,7 +124,7 @@
// return address). Replace those first.
entry_frame_ = new VirtualFrame();
int index = 0;
- for (; index < entry_frame_->elements_.length(); index++) {
+ for (; index < entry_frame_->element_count(); index++) {
FrameElement* target = elements[index];
// If the element is determined, set it now. Count registers. Mark
// elements as copied exactly when they have a copy. Undetermined
@@ -155,7 +155,7 @@
bool is_synced = true;
RegisterFile candidate_registers;
int best_count = kMinInt;
- int best_reg_code = no_reg.code_;
+ int best_reg_num = RegisterAllocator::kInvalidRegister;
StaticType type; // Initially invalid.
if (direction_ != BIDIRECTIONAL || i < high_water_mark) {
@@ -168,10 +168,11 @@
if (element.is_register() && !entry_frame_->is_used(element.reg())) {
// Count the register occurrence and remember it if better
// than the previous best.
- candidate_registers.Use(element.reg());
- if (candidate_registers.count(element.reg()) > best_count) {
- best_count = candidate_registers.count(element.reg());
- best_reg_code = element.reg().code();
+ int num = RegisterAllocator::ToNumber(element.reg());
+ candidate_registers.Use(num);
+ if (candidate_registers.count(num) > best_count) {
+ best_count = candidate_registers.count(num);
+ best_reg_num = num;
}
}
type = type.merge(element.static_type());
@@ -188,16 +189,16 @@
// Try to put it in a register. If there was no best choice
// consider any free register.
- if (best_reg_code == no_reg.code_) {
- for (int j = 0; j < kNumRegisters; j++) {
- if (!entry_frame_->is_used(j) && !RegisterAllocator::IsReserved(j)) {
- best_reg_code = j;
+ if (best_reg_num == RegisterAllocator::kInvalidRegister) {
+ for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
+ if (!entry_frame_->is_used(j)) {
+ best_reg_num = j;
break;
}
}
}
- if (best_reg_code == no_reg.code_) {
+ if (best_reg_num == RegisterAllocator::kInvalidRegister) {
// If there was no register found, the element is already
// recorded as in memory.
entry_frame_->elements_[i].set_static_type(type);
@@ -205,13 +206,13 @@
// If there was a register choice, use it. Preserve the copied
// flag on the element. Set the static type as computed.
bool is_copied = entry_frame_->elements_[i].is_copied();
- Register reg = { best_reg_code };
+ Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED);
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->elements_[i].set_static_type(type);
- entry_frame_->register_locations_[best_reg_code] = i;
+ entry_frame_->set_register_location(reg, i);
}
}
}
diff --git a/src/log-utils.cc b/src/log-utils.cc
new file mode 100644
index 0000000..4361049
--- /dev/null
+++ b/src/log-utils.cc
@@ -0,0 +1,302 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "log-utils.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+LogDynamicBuffer::LogDynamicBuffer(
+ int block_size, int max_size, const char* seal, int seal_size)
+ : block_size_(block_size),
+ max_size_(max_size - (max_size % block_size_)),
+ seal_(seal),
+ seal_size_(seal_size),
+ blocks_(max_size_ / block_size_ + 1),
+ write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
+ ASSERT(BlocksCount() > 0);
+ AllocateBlock(0);
+ for (int i = 1; i < BlocksCount(); ++i) {
+ blocks_[i] = NULL;
+ }
+}
+
+
+LogDynamicBuffer::~LogDynamicBuffer() {
+ for (int i = 0; i < BlocksCount(); ++i) {
+ DeleteArray(blocks_[i]);
+ }
+}
+
+
+int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
+ if (buf_size == 0) return 0;
+ int read_pos = from_pos;
+ int block_read_index = BlockIndex(from_pos);
+ int block_read_pos = PosInBlock(from_pos);
+ int dest_buf_pos = 0;
+ // Read until dest_buf is filled, or write_pos_ encountered.
+ while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
+ const int read_size = Min(write_pos_ - read_pos,
+ Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
+ memcpy(dest_buf + dest_buf_pos,
+ blocks_[block_read_index] + block_read_pos, read_size);
+ block_read_pos += read_size;
+ dest_buf_pos += read_size;
+ read_pos += read_size;
+ if (block_read_pos == block_size_) {
+ block_read_pos = 0;
+ ++block_read_index;
+ }
+ }
+ return dest_buf_pos;
+}
+
+
+int LogDynamicBuffer::Seal() {
+ WriteInternal(seal_, seal_size_);
+ is_sealed_ = true;
+ return 0;
+}
+
+
+int LogDynamicBuffer::Write(const char* data, int data_size) {
+ if (is_sealed_) {
+ return 0;
+ }
+ if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
+ return WriteInternal(data, data_size);
+ } else {
+ return Seal();
+ }
+}
+
+
+int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
+ int data_pos = 0;
+ while (data_pos < data_size) {
+ const int write_size =
+ Min(data_size - data_pos, block_size_ - block_write_pos_);
+ memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
+ write_size);
+ block_write_pos_ += write_size;
+ data_pos += write_size;
+ if (block_write_pos_ == block_size_) {
+ block_write_pos_ = 0;
+ AllocateBlock(++block_index_);
+ }
+ }
+ write_pos_ += data_size;
+ return data_size;
+}
+
+
+bool Log::is_stopped_ = false;
+Log::WritePtr Log::Write = NULL;
+FILE* Log::output_handle_ = NULL;
+LogDynamicBuffer* Log::output_buffer_ = NULL;
+// Must be the same message as in Logger::PauseProfiler
+const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
+Mutex* Log::mutex_ = NULL;
+char* Log::message_buffer_ = NULL;
+
+
+void Log::Init() {
+ mutex_ = OS::CreateMutex();
+ message_buffer_ = NewArray<char>(kMessageBufferSize);
+}
+
+
+void Log::OpenStdout() {
+ ASSERT(!IsEnabled());
+ output_handle_ = stdout;
+ Write = WriteToFile;
+ Init();
+}
+
+
+void Log::OpenFile(const char* name) {
+ ASSERT(!IsEnabled());
+ output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ Write = WriteToFile;
+ Init();
+}
+
+
+void Log::OpenMemoryBuffer() {
+ ASSERT(!IsEnabled());
+ output_buffer_ = new LogDynamicBuffer(
+ kDynamicBufferBlockSize, kMaxDynamicBufferSize,
+ kDynamicBufferSeal, strlen(kDynamicBufferSeal));
+ Write = WriteToMemory;
+ Init();
+}
+
+
+void Log::Close() {
+ if (Write == WriteToFile) {
+ fclose(output_handle_);
+ output_handle_ = NULL;
+ } else if (Write == WriteToMemory) {
+ delete output_buffer_;
+ output_buffer_ = NULL;
+ } else {
+ ASSERT(Write == NULL);
+ }
+ Write = NULL;
+
+ delete mutex_;
+ mutex_ = NULL;
+
+ is_stopped_ = false;
+}
+
+
+int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+ if (Write != WriteToMemory) return 0;
+ ASSERT(output_buffer_ != NULL);
+ ASSERT(from_pos >= 0);
+ ASSERT(max_size >= 0);
+ int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
+ ASSERT(actual_size <= max_size);
+ if (actual_size == 0) return 0;
+
+ // Find previous log line boundary.
+ char* end_pos = dest_buf + actual_size - 1;
+ while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
+ actual_size = end_pos - dest_buf + 1;
+ ASSERT(actual_size <= max_size);
+ return actual_size;
+}
+
+
+LogMessageBuilder::WriteFailureHandler
+ LogMessageBuilder::write_failure_handler = NULL;
+
+
+LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
+ ASSERT(Log::message_buffer_ != NULL);
+}
+
+
+void LogMessageBuilder::Append(const char* format, ...) {
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ va_list args;
+ va_start(args, format);
+ Append(format, args);
+ va_end(args);
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(const char* format, va_list args) {
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ int result = v8::internal::OS::VSNPrintF(buf, format, args);
+
+ // Result is -1 if output was truncated.
+ if (result >= 0) {
+ pos_ += result;
+ } else {
+ pos_ = Log::kMessageBufferSize;
+ }
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(const char c) {
+ if (pos_ < Log::kMessageBufferSize) {
+ Log::message_buffer_[pos_++] = c;
+ }
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
+void LogMessageBuilder::Append(String* str) {
+ AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ int length = str->length();
+ for (int i = 0; i < length; i++) {
+ Append(static_cast<char>(str->Get(i)));
+ }
+}
+
+
+void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+ AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ int len = str->length();
+ if (len > 0x1000)
+ len = 0x1000;
+ if (show_impl_info) {
+ Append(str->IsAsciiRepresentation() ? 'a' : '2');
+ if (StringShape(str).IsExternal())
+ Append('e');
+ if (StringShape(str).IsSymbol())
+ Append('#');
+ Append(":%i:", str->length());
+ }
+ for (int i = 0; i < len; i++) {
+ uc32 c = str->Get(i);
+ if (c > 0xff) {
+ Append("\\u%04x", c);
+ } else if (c < 32 || c > 126) {
+ Append("\\x%02x", c);
+ } else if (c == ',') {
+ Append("\\,");
+ } else if (c == '\\') {
+ Append("\\\\");
+ } else {
+ Append("%lc", c);
+ }
+ }
+}
+
+
+void LogMessageBuilder::WriteToLogFile() {
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+ const int written = Log::Write(Log::message_buffer_, pos_);
+ if (written != pos_ && write_failure_handler != NULL) {
+ write_failure_handler();
+ }
+}
+
+
+void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
+ const int len = strlen(str);
+ const int written = Log::Write(str, len);
+ if (written != len && write_failure_handler != NULL) {
+ write_failure_handler();
+ }
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
new file mode 100644
index 0000000..2e8b3a3
--- /dev/null
+++ b/src/log-utils.h
@@ -0,0 +1,223 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_UTILS_H_
+#define V8_LOG_UTILS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A memory buffer that increments its size as you write in it. Size
+// is incremented with 'block_size' steps, never exceeding 'max_size'.
+// During growth, memory contents are never copied. At the end of the
+// buffer an amount of memory specified in 'seal_size' is reserved.
+// When writing position reaches max_size - seal_size, buffer auto-seals
+// itself with 'seal' and allows no further writes. Data pointed by
+// 'seal' must be available during entire LogDynamicBuffer lifetime.
+//
+// An instance of this class is created dynamically by Log.
+class LogDynamicBuffer {
+ public:
+ LogDynamicBuffer(
+ int block_size, int max_size, const char* seal, int seal_size);
+
+ ~LogDynamicBuffer();
+
+ // Reads contents of the buffer starting from 'from_pos'. Upon
+ // return, 'dest_buf' is filled with the data. Actual amount of data
+ // filled is returned, it is <= 'buf_size'.
+ int Read(int from_pos, char* dest_buf, int buf_size);
+
+ // Writes 'data' to the buffer, making it larger if necessary. If
+ // data is too big to fit in the buffer, it doesn't get written at
+ // all. In that case, buffer auto-seals itself and stops to accept
+ // any incoming writes. Returns amount of data written (it is either
+ // 'data_size', or 0, if 'data' is too big).
+ int Write(const char* data, int data_size);
+
+ private:
+ void AllocateBlock(int index) {
+ blocks_[index] = NewArray<char>(block_size_);
+ }
+
+ int BlockIndex(int pos) const { return pos / block_size_; }
+
+ int BlocksCount() const { return BlockIndex(max_size_) + 1; }
+
+ int PosInBlock(int pos) const { return pos % block_size_; }
+
+ int Seal();
+
+ int WriteInternal(const char* data, int data_size);
+
+ const int block_size_;
+ const int max_size_;
+ const char* seal_;
+ const int seal_size_;
+ ScopedVector<char*> blocks_;
+ int write_pos_;
+ int block_index_;
+ int block_write_pos_;
+ bool is_sealed_;
+};
+
+
+// Functions and data for performing output of log messages.
+class Log : public AllStatic {
+ public:
+ // Opens stdout for logging.
+ static void OpenStdout();
+
+ // Opens file for logging.
+ static void OpenFile(const char* name);
+
+ // Opens memory buffer for logging.
+ static void OpenMemoryBuffer();
+
+ // Disables logging, but preserves acquired resources.
+ static void stop() { is_stopped_ = true; }
+
+ // Frees all resources acquired in Open... functions.
+ static void Close();
+
+ // See description in include/v8.h.
+ static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+ // Returns whether logging is enabled.
+ static bool IsEnabled() {
+ return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
+ }
+
+ private:
+ typedef int (*WritePtr)(const char* msg, int length);
+
+ // Initialization function called from Open... functions.
+ static void Init();
+
+ // Write functions assume that mutex_ is acquired by the caller.
+ static WritePtr Write;
+
+ // Implementation of writing to a log file.
+ static int WriteToFile(const char* msg, int length) {
+ ASSERT(output_handle_ != NULL);
+ int rv = fwrite(msg, 1, length, output_handle_);
+ ASSERT(length == rv);
+ return rv;
+ }
+
+ // Implementation of writing to a memory buffer.
+ static int WriteToMemory(const char* msg, int length) {
+ ASSERT(output_buffer_ != NULL);
+ return output_buffer_->Write(msg, length);
+ }
+
+ // Whether logging is stopped (e.g. due to insufficient resources).
+ static bool is_stopped_;
+
+ // When logging is active, either output_handle_ or output_buffer_ is used
+ // to store a pointer to log destination. If logging was opened via OpenStdout
+ // or OpenFile, then output_handle_ is used. If logging was opened
+ // via OpenMemoryBuffer, then output_buffer_ is used.
+ // mutex_ should be acquired before using output_handle_ or output_buffer_.
+ static FILE* output_handle_;
+
+ static LogDynamicBuffer* output_buffer_;
+
+ // Size of dynamic buffer block (and dynamic buffer initial size).
+ static const int kDynamicBufferBlockSize = 65536;
+
+ // Maximum size of dynamic buffer.
+ static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
+
+ // Message to "seal" dynamic buffer with.
+ static const char* kDynamicBufferSeal;
+
+ // mutex_ is a Mutex used for enforcing exclusive
+ // access to the formatting buffer and the log file or log memory buffer.
+ static Mutex* mutex_;
+
+ // Size of buffer used for formatting log messages.
+ static const int kMessageBufferSize = 2048;
+
+ // Buffer used for formatting log messages. This is a singleton buffer and
+ // mutex_ should be acquired before using it.
+ static char* message_buffer_;
+
+ friend class LogMessageBuilder;
+};
+
+
+// Utility class for formatting log messages. It fills the message into the
+// static buffer in Log.
+class LogMessageBuilder BASE_EMBEDDED {
+ public:
+ // Create a message builder starting from position 0. This acquires the mutex
+ // in the log as well.
+ explicit LogMessageBuilder();
+ ~LogMessageBuilder() { }
+
+ // Append string data to the log message.
+ void Append(const char* format, ...);
+
+ // Append string data to the log message.
+ void Append(const char* format, va_list args);
+
+ // Append a character to the log message.
+ void Append(const char c);
+
+ // Append a heap string.
+ void Append(String* str);
+
+ void AppendDetailed(String* str, bool show_impl_info);
+
+ // Write the log message to the log file currently opened.
+ void WriteToLogFile();
+
+ // Write a null-terminated string to to the log file currently opened.
+ void WriteCStringToLogFile(const char* str);
+
+ // A handler that is called when Log::Write fails.
+ typedef void (*WriteFailureHandler)();
+
+ static void set_write_failure_handler(WriteFailureHandler handler) {
+ write_failure_handler = handler;
+ }
+
+ private:
+ static WriteFailureHandler write_failure_handler;
+
+ ScopedLock sl;
+ int pos_;
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_LOG_UTILS_H_
diff --git a/src/log.cc b/src/log.cc
index 59018a1..c66a422 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "log.h"
+#include "log-utils.h"
#include "macro-assembler.h"
#include "platform.h"
#include "serialize.h"
@@ -255,7 +256,7 @@
// Register to get ticks.
Logger::ticker_->SetProfiler(this);
- LOG(UncheckedStringEvent("profiler", "begin"));
+ Logger::ProfilerBeginEvent();
}
@@ -287,304 +288,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// Functions and data for performing output of log messages.
-class Log : public AllStatic {
- public:
- // Opens stdout for logging.
- static void OpenStdout();
-
- // Opens file for logging.
- static void OpenFile(const char* name);
-
- // Opens memory buffer for logging.
- static void OpenMemoryBuffer();
-
- // Frees all resources acquired in Open... functions.
- static void Close();
-
- // See description in include/v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
-
- // Returns whether logging is enabled.
- static bool IsEnabled() {
- return output_handle_ != NULL || output_buffer_ != NULL;
- }
-
- private:
- typedef int (*WritePtr)(const char* msg, int length);
-
- // Initialization function called from Open... functions.
- static void Init();
-
- // Write functions assume that mutex_ is acquired by the caller.
- static WritePtr Write;
-
- // Implementation of writing to a log file.
- static int WriteToFile(const char* msg, int length) {
- ASSERT(output_handle_ != NULL);
- int rv = fwrite(msg, 1, length, output_handle_);
- ASSERT(length == rv);
- return rv;
- }
-
- // Implementation of writing to a memory buffer.
- static int WriteToMemory(const char* msg, int length) {
- ASSERT(output_buffer_ != NULL);
- ASSERT(output_buffer_write_pos_ >= output_buffer_);
- if (output_buffer_write_pos_ + length
- <= output_buffer_ + kOutputBufferSize) {
- memcpy(output_buffer_write_pos_, msg, length);
- output_buffer_write_pos_ += length;
- return length;
- } else {
- // Memory buffer is full, ignore write.
- return 0;
- }
- }
-
- // When logging is active, either output_handle_ or output_buffer_ is used
- // to store a pointer to log destination. If logging was opened via OpenStdout
- // or OpenFile, then output_handle_ is used. If logging was opened
- // via OpenMemoryBuffer, then output_buffer_ is used.
- // mutex_ should be acquired before using output_handle_ or output_buffer_.
- static FILE* output_handle_;
-
- static char* output_buffer_;
-
- // mutex_ is a Mutex used for enforcing exclusive
- // access to the formatting buffer and the log file or log memory buffer.
- static Mutex* mutex_;
-
- // Size of buffer used for memory logging.
- static const int kOutputBufferSize = 2 * 1024 * 1024;
-
- // Writing position in a memory buffer.
- static char* output_buffer_write_pos_;
-
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
-
- // Buffer used for formatting log messages. This is a singleton buffer and
- // mutex_ should be acquired before using it.
- static char* message_buffer_;
-
- friend class LogMessageBuilder;
-};
-
-
-Log::WritePtr Log::Write = NULL;
-FILE* Log::output_handle_ = NULL;
-char* Log::output_buffer_ = NULL;
-Mutex* Log::mutex_ = NULL;
-char* Log::output_buffer_write_pos_ = NULL;
-char* Log::message_buffer_ = NULL;
-
-
-void Log::Init() {
- mutex_ = OS::CreateMutex();
- message_buffer_ = NewArray<char>(kMessageBufferSize);
-}
-
-
-void Log::OpenStdout() {
- ASSERT(!IsEnabled());
- output_handle_ = stdout;
- Write = WriteToFile;
- Init();
-}
-
-
-void Log::OpenFile(const char* name) {
- ASSERT(!IsEnabled());
- output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- Write = WriteToFile;
- Init();
-}
-
-
-void Log::OpenMemoryBuffer() {
- ASSERT(!IsEnabled());
- output_buffer_ = NewArray<char>(kOutputBufferSize);
- output_buffer_write_pos_ = output_buffer_;
- Write = WriteToMemory;
- Init();
-}
-
-
-void Log::Close() {
- if (Write == WriteToFile) {
- fclose(output_handle_);
- output_handle_ = NULL;
- } else if (Write == WriteToMemory) {
- DeleteArray(output_buffer_);
- output_buffer_ = NULL;
- } else {
- ASSERT(Write == NULL);
- }
- Write = NULL;
-
- delete mutex_;
- mutex_ = NULL;
-
- DeleteArray(message_buffer_);
- message_buffer_ = NULL;
-}
-
-
-int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (Write != WriteToMemory) return 0;
- ASSERT(output_buffer_ != NULL);
- ASSERT(output_buffer_write_pos_ >= output_buffer_);
- ASSERT(from_pos >= 0);
- ASSERT(max_size >= 0);
- int actual_size = max_size;
- char* buffer_read_pos = output_buffer_ + from_pos;
- ScopedLock sl(mutex_);
- if (actual_size == 0
- || output_buffer_write_pos_ == output_buffer_
- || buffer_read_pos >= output_buffer_write_pos_) {
- // No data requested or can be returned.
- return 0;
- }
- if (buffer_read_pos + actual_size > output_buffer_write_pos_) {
- // Requested size overlaps with current writing position and
- // needs to be truncated.
- actual_size = output_buffer_write_pos_ - buffer_read_pos;
- ASSERT(actual_size == 0 || buffer_read_pos[actual_size - 1] == '\n');
- } else {
- // Find previous log line boundary.
- char* end_pos = buffer_read_pos + actual_size - 1;
- while (end_pos >= buffer_read_pos && *end_pos != '\n') --end_pos;
- actual_size = end_pos - buffer_read_pos + 1;
- }
- ASSERT(actual_size <= max_size);
- if (actual_size > 0) {
- memcpy(dest_buf, buffer_read_pos, actual_size);
- }
- return actual_size;
-}
-
-
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- explicit LogMessageBuilder();
- ~LogMessageBuilder() { }
-
- void Append(const char* format, ...);
- void Append(const char* format, va_list args);
- void Append(const char c);
- void Append(String* str);
- void AppendDetailed(String* str, bool show_impl_info);
-
- void WriteToLogFile();
- void WriteCStringToLogFile(const char* str);
-
- private:
- ScopedLock sl;
- int pos_;
-};
-
-
-// Create a message builder starting from position 0. This acquires the mutex
-// in the logger as well.
-LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
- ASSERT(Log::message_buffer_ != NULL);
-}
-
-
-// Append string data to the log message.
-void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(Log::message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- va_list args;
- va_start(args, format);
- Append(format, args);
- va_end(args);
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append string data to the log message.
-void LogMessageBuilder::Append(const char* format, va_list args) {
- Vector<char> buf(Log::message_buffer_ + pos_,
- Log::kMessageBufferSize - pos_);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
-
- // Result is -1 if output was truncated.
- if (result >= 0) {
- pos_ += result;
- } else {
- pos_ = Log::kMessageBufferSize;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append a character to the log message.
-void LogMessageBuilder::Append(const char c) {
- if (pos_ < Log::kMessageBufferSize) {
- Log::message_buffer_[pos_++] = c;
- }
- ASSERT(pos_ <= Log::kMessageBufferSize);
-}
-
-
-// Append a heap string.
-void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int length = str->length();
- for (int i = 0; i < length; i++) {
- Append(static_cast<char>(str->Get(i)));
- }
-}
-
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
- int len = str->length();
- if (len > 0x1000)
- len = 0x1000;
- if (show_impl_info) {
- Append(str->IsAsciiRepresentation() ? 'a' : '2');
- if (StringShape(str).IsExternal())
- Append('e');
- if (StringShape(str).IsSymbol())
- Append('#');
- Append(":%i:", str->length());
- }
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c > 0xff) {
- Append("\\u%04x", c);
- } else if (c < 32 || c > 126) {
- Append("\\x%02x", c);
- } else if (c == ',') {
- Append("\\,");
- } else if (c == '\\') {
- Append("\\\\");
- } else {
- Append("%lc", c);
- }
- }
-}
-
-// Write the log message to the log file currently opened.
-void LogMessageBuilder::WriteToLogFile() {
- ASSERT(pos_ <= Log::kMessageBufferSize);
- Log::Write(Log::message_buffer_, pos_);
-}
-
-// Write a null-terminated string to to the log file currently opened.
-void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
- int len = strlen(str);
- Log::Write(str, len);
-}
-#endif
-
-
//
// Logger class implementation.
//
@@ -599,6 +302,14 @@
return Log::IsEnabled();
}
+
+void Logger::ProfilerBeginEvent() {
+ if (!Log::IsEnabled()) return;
+ LogMessageBuilder msg;
+ msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+ msg.WriteToLogFile();
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
@@ -1106,16 +817,23 @@
void Logger::PauseProfiler() {
+ if (profiler_->paused()) {
+ return;
+ }
profiler_->pause();
if (FLAG_prof_lazy) {
if (!FLAG_sliding_state_window) ticker_->Stop();
FLAG_log_code = false;
+ // Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause"));
}
}
void Logger::ResumeProfiler() {
+ if (!profiler_->paused() || !Log::IsEnabled()) {
+ return;
+ }
if (FLAG_prof_lazy) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
@@ -1126,6 +844,14 @@
}
+// This function can be called when Log's mutex is acquired,
+// either from main or Profiler's thread.
+void Logger::StopLoggingAndProfiling() {
+ Log::stop();
+ PauseProfiler();
+}
+
+
bool Logger::IsProfilerSamplerActive() {
return ticker_->IsActive();
}
@@ -1279,7 +1005,8 @@
// as log is initialized early with V8, we can assume that JS execution
// frames can never reach this point on stack
int stack_var;
- ticker_ = new Ticker(1, reinterpret_cast<uintptr_t>(&stack_var));
+ ticker_ = new Ticker(
+ kSamplingIntervalMs, reinterpret_cast<uintptr_t>(&stack_var));
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow();
@@ -1292,6 +1019,8 @@
profiler_->Engage();
}
+ LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
+
return true;
#else
@@ -1302,6 +1031,8 @@
void Logger::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ LogMessageBuilder::set_write_failure_handler(NULL);
+
// Stop the profiler before closing the file.
if (profiler_ != NULL) {
profiler_->Disengage();
diff --git a/src/log.h b/src/log.h
index 56cf93d..10d8833 100644
--- a/src/log.h
+++ b/src/log.h
@@ -220,6 +220,12 @@
private:
+ // Profiler's sampling interval (in milliseconds).
+ static const int kSamplingIntervalMs = 1;
+
+ // Emits the profiler's first message.
+ static void ProfilerBeginEvent();
+
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
@@ -231,6 +237,9 @@
// Logs a StringEvent regardless of whether FLAG_log is true.
static void UncheckedStringEvent(const char* name, const char* value);
+ // Stops logging and profiling in case of insufficient resources.
+ static void StopLoggingAndProfiling();
+
// Returns whether profiler's sampler is active.
static bool IsProfilerSamplerActive();
diff --git a/src/memory.h b/src/memory.h
index 86a9188..1e36bf5 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -44,6 +44,10 @@
return *reinterpret_cast<int32_t*>(addr);
}
+ static uint64_t& uint64_at(Address addr) {
+ return *reinterpret_cast<uint64_t*>(addr);
+ }
+
static int& int_at(Address addr) {
return *reinterpret_cast<int*>(addr);
}
diff --git a/src/platform.h b/src/platform.h
index f7fdafe..4522c74 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -493,7 +493,7 @@
// TickSample captures the information collected for each sample.
class TickSample {
public:
- TickSample() : pc(0), sp(0), fp(0), state(OTHER) {}
+ TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
uintptr_t pc; // Instruction pointer.
uintptr_t sp; // Stack pointer.
uintptr_t fp; // Frame pointer.
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
index a94f24e..8fb498b 100644
--- a/src/register-allocator-inl.h
+++ b/src/register-allocator-inl.h
@@ -32,6 +32,17 @@
#include "register-allocator.h"
#include "virtual-frame.h"
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+
namespace v8 {
namespace internal {
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index a3939cb..2599232 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -38,7 +38,7 @@
Result::Result(Register reg) {
- ASSERT(reg.is_valid());
+ ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
| TypeField::encode(REGISTER)
@@ -47,7 +47,7 @@
Result::Result(Register reg, StaticType type) {
- ASSERT(reg.is_valid());
+ ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
value_ = StaticTypeField::encode(type.static_type_)
| TypeField::encode(REGISTER)
@@ -61,12 +61,11 @@
Result RegisterAllocator::AllocateWithoutSpilling() {
// Return the first free register, if any.
- int free_reg = registers_.ScanForFreeRegister();
- if (free_reg < kNumRegisters) {
- Register free_result = { free_reg };
- return Result(free_result);
+ int num = registers_.ScanForFreeRegister();
+ if (num == RegisterAllocator::kInvalidRegister) {
+ return Result();
}
- return Result();
+ return Result(RegisterAllocator::ToRegister(num));
}
diff --git a/src/register-allocator.h b/src/register-allocator.h
index 76f4317..c539191 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -30,6 +30,16 @@
#include "macro-assembler.h"
+#if V8_TARGET_ARCH_IA32
+#include "ia32/register-allocator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/register-allocator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/register-allocator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
namespace v8 {
namespace internal {
@@ -241,25 +251,28 @@
}
}
- // Predicates and accessors for the reference counts. The versions
- // that take a register code rather than a register are for
- // convenience in loops over the register codes.
- bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
- bool is_used(Register reg) const { return is_used(reg.code()); }
- int count(int reg_code) const { return ref_counts_[reg_code]; }
- int count(Register reg) const { return count(reg.code()); }
+ // Predicates and accessors for the reference counts.
+ bool is_used(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ return ref_counts_[num] > 0;
+ }
+
+ int count(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ return ref_counts_[num];
+ }
// Record a use of a register by incrementing its reference count.
- void Use(Register reg) {
- ref_counts_[reg.code()]++;
+ void Use(int num) {
+ ASSERT(0 <= num && num < kNumRegisters);
+ ref_counts_[num]++;
}
// Record that a register will no longer be used by decrementing its
// reference count.
- void Unuse(Register reg) {
- ASSERT(!reg.is(no_reg));
- ASSERT(is_used(reg.code()));
- ref_counts_[reg.code()]--;
+ void Unuse(int num) {
+ ASSERT(is_used(num));
+ ref_counts_[num]--;
}
// Copy the reference counts from this register file to the other.
@@ -270,17 +283,18 @@
}
private:
+ static const int kNumRegisters = RegisterAllocatorConstants::kNumRegisters;
+
int ref_counts_[kNumRegisters];
- // Very fast inlined loop to find a free register.
- // Used in RegisterAllocator::AllocateWithoutSpilling.
- // Returns kNumRegisters if no free register found.
- inline int ScanForFreeRegister() {
- int i = 0;
- for (; i < kNumRegisters ; ++i) {
- if (ref_counts_[i] == 0) break;
+ // Very fast inlined loop to find a free register. Used in
+ // RegisterAllocator::AllocateWithoutSpilling. Returns
+ // kInvalidRegister if no free register found.
+ int ScanForFreeRegister() {
+ for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
+ if (!is_used(i)) return i;
}
- return i;
+ return RegisterAllocatorConstants::kInvalidRegister;
}
friend class RegisterAllocator;
@@ -293,55 +307,62 @@
class RegisterAllocator BASE_EMBEDDED {
public:
+ static const int kNumRegisters =
+ RegisterAllocatorConstants::kNumRegisters;
+ static const int kInvalidRegister =
+ RegisterAllocatorConstants::kInvalidRegister;
+
explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
- // A register file with each of the reserved registers counted once.
- static RegisterFile Reserved();
-
- // Unuse all the reserved registers in a register file.
- static void UnuseReserved(RegisterFile* register_file);
-
// True if the register is reserved by the code generator, false if it
- // can be freely used by the allocator.
- static bool IsReserved(int reg_code);
- static bool IsReserved(Register reg) { return IsReserved(reg); }
+ // can be freely used by the allocator Defined in the
+ // platform-specific XXX-inl.h files..
+ static inline bool IsReserved(Register reg);
+
+ // Convert between (unreserved) assembler registers and allocator
+ // numbers. Defined in the platform-specific XXX-inl.h files.
+ static inline int ToNumber(Register reg);
+ static inline Register ToRegister(int num);
// Predicates and accessors for the registers' reference counts.
- bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
- bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
- int count(int reg_code) const { return registers_.count(reg_code); }
- int count(Register reg) const { return registers_.count(reg.code()); }
+ bool is_used(int num) { return registers_.is_used(num); }
+ bool is_used(Register reg) { return registers_.is_used(ToNumber(reg)); }
+
+ int count(int num) { return registers_.count(num); }
+ int count(Register reg) { return registers_.count(ToNumber(reg)); }
// Explicitly record a reference to a register.
- void Use(Register reg) { registers_.Use(reg); }
+ void Use(int num) { registers_.Use(num); }
+ void Use(Register reg) { registers_.Use(ToNumber(reg)); }
// Explicitly record that a register will no longer be used.
- void Unuse(Register reg) { registers_.Unuse(reg); }
-
- // Initialize the register allocator for entry to a JS function. On
- // entry, the registers used by the JS calling convention are
- // externally referenced (ie, outside the virtual frame); and the
- // other registers are free.
- void Initialize();
+ void Unuse(int num) { registers_.Unuse(num); }
+ void Unuse(Register reg) { registers_.Unuse(ToNumber(reg)); }
// Reset the register reference counts to free all non-reserved registers.
- // A frame-external reference is kept to each of the reserved registers.
- void Reset();
+ void Reset() { registers_.Reset(); }
+
+ // Initialize the register allocator for entry to a JS function. On
+ // entry, the (non-reserved) registers used by the JS calling
+ // convention are referenced and the other (non-reserved) registers
+ // are free.
+ inline void Initialize();
// Allocate a free register and return a register result if possible or
// fail and return an invalid result.
Result Allocate();
- // Allocate a specific register if possible, spilling it from the frame if
- // necessary, or else fail and return an invalid result.
+ // Allocate a specific register if possible, spilling it from the
+ // current frame if necessary, or else fail and return an invalid
+ // result.
Result Allocate(Register target);
- // Allocate a free register without spilling any from the current frame or
- // fail and return an invalid result.
+ // Allocate a free register without spilling any from the current
+ // frame or fail and return an invalid result.
Result AllocateWithoutSpilling();
- // Allocate a free byte register without spilling any from the
- // current frame or fail and return an invalid result.
+ // Allocate a free byte register without spilling any from the current
+ // frame or fail and return an invalid result.
Result AllocateByteRegisterWithoutSpilling();
// Copy the internal state to a register file, to be restored later by
@@ -350,6 +371,7 @@
registers_.CopyTo(register_file);
}
+ // Restore the internal state.
void RestoreFrom(RegisterFile* register_file) {
register_file->CopyTo(®isters_);
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 1f67c4d..2fcdff1 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -5519,17 +5519,8 @@
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsProxy() || structure->IsAccessorInfo()) {
- if (Debug::debugger_entry()) {
- // SaveContext scope. It will restore debugger context after the
- // getter execution.
- SaveContext save;
- Top::set_context(*Debug::debugger_entry()->GetContext());
- value = receiver->GetPropertyWithCallback(
- receiver, structure, name, result->holder());
- } else {
- value = receiver->GetPropertyWithCallback(
- receiver, structure, name, result->holder());
- }
+ value = receiver->GetPropertyWithCallback(
+ receiver, structure, name, result->holder());
if (value->IsException()) {
value = Top::pending_exception();
Top::clear_pending_exception();
@@ -5575,6 +5566,17 @@
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
+ // Make sure to set the current context to the context before the debugger was
+ // entered (if the debugger is entered). The reason for switching context here
+ // is that for some property lookups (accessors and interceptors) callbacks
+ // into the embedding application can occour, and the embedding application
+ // could have the assumption that its own global context is the current
+ // context and not some internal debugger context.
+ SaveContext save;
+ if (Debug::InDebugger()) {
+ Top::set_context(*Debug::debugger_entry()->GetContext());
+ }
+
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (obj->IsJSGlobalProxy()) {
@@ -5609,15 +5611,29 @@
}
if (result.IsProperty()) {
+ // LookupResult is not GC safe as all its members are raw object pointers.
+ // When calling DebugLookupResultValue GC can happen as this might invoke
+ // callbacks. After the call to DebugLookupResultValue the callback object
+ // in the LookupResult might still be needed. Put it into a handle for later
+ // use.
+ PropertyType result_type = result.type();
+ Handle<Object> result_callback_obj;
+ if (result_type == CALLBACKS) {
+ result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ }
+
+ // Find the actual value. Don't use result after this call as it's content
+ // can be invalid.
bool caught_exception = false;
Object* value = DebugLookupResultValue(*obj, *name, &result,
&caught_exception);
if (value->IsFailure()) return value;
Handle<Object> value_handle(value);
+
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
- bool hasJavaScriptAccessors = result.type() == CALLBACKS &&
- result.GetCallbackObject()->IsFixedArray();
+ bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ result_callback_obj->IsFixedArray();
Handle<FixedArray> details =
Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
details->set(0, *value_handle);
diff --git a/src/spaces.cc b/src/spaces.cc
index e61c6ad..72b028c 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1332,6 +1332,13 @@
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(size_in_bytes);
+ // We don't use the freelists in compacting mode. This makes it more like a
+ // GC that only has mark-sweep-compact and doesn't have a mark-sweep
+ // collector.
+ if (FLAG_always_compact) {
+ return size_in_bytes;
+ }
+
// Early return to drop too-small blocks on the floor (one or two word
// blocks cannot hold a map pointer, a size field, and a pointer to the
// next block in the free list).
@@ -1363,6 +1370,7 @@
if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
available_ -= size_in_bytes;
*wasted_bytes = 0;
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
return node;
}
// Search the size list for the best fit.
@@ -1374,6 +1382,7 @@
*wasted_bytes = 0;
return Failure::RetryAfterGC(size_in_bytes, owner_);
}
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
int rem = cur - index;
int rem_bytes = rem << kPointerSizeLog2;
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
@@ -1454,6 +1463,7 @@
Memory::Address_at(start + i) = kZapValue;
}
#endif
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(Map::kSize);
node->set_next(head_);
@@ -1467,6 +1477,7 @@
return Failure::RetryAfterGC(Map::kSize, owner_);
}
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
head_ = node->next();
available_ -= Map::kSize;
diff --git a/src/string.js b/src/string.js
index 0bcabc9..df1f393 100644
--- a/src/string.js
+++ b/src/string.js
@@ -120,20 +120,26 @@
// ECMA-262 section 15.5.4.8
function StringLastIndexOf(searchString /* position */) { // length == 1
var sub = ToString(this);
+ var subLength = sub.length;
var pat = ToString(searchString);
- var index = (%_ArgumentsLength() > 1)
- ? ToNumber(%_Arguments(1) /* position */)
- : $NaN;
- var firstIndex;
- if ($isNaN(index)) {
- firstIndex = sub.length - pat.length;
- } else {
- firstIndex = TO_INTEGER(index);
- if (firstIndex + pat.length > sub.length) {
- firstIndex = sub.length - pat.length;
+ var patLength = pat.length;
+ var index = subLength - patLength;
+ if (%_ArgumentsLength() > 1) {
+ var position = ToNumber(%_Arguments(1));
+ if (!$isNaN(position)) {
+ position = TO_INTEGER(position);
+ if (position < 0) {
+ position = 0;
+ }
+ if (position + patLength < subLength) {
+ index = position
+ }
}
}
- return %StringLastIndexOf(sub, pat, firstIndex);
+ if (index < 0) {
+ return -1;
+ }
+ return %StringLastIndexOf(sub, pat, index);
}
diff --git a/src/utils.h b/src/utils.h
index 36a929c..137e2c4 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -379,6 +379,9 @@
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(NULL, 0); }
+ protected:
+ void set_start(T* start) { start_ = start; }
+
private:
T* start_;
int length_;
@@ -406,6 +409,22 @@
class EmbeddedVector : public Vector<T> {
public:
EmbeddedVector() : Vector<T>(buffer_, kSize) { }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs)
+ : Vector<T>(rhs) {
+ memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ Vector<T>::operator=(rhs);
+ memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ set_start(buffer_);
+ return *this;
+ }
+
private:
T buffer_[kSize];
};
diff --git a/src/version.cc b/src/version.cc
index 65d8fe0..3d521bf 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 2
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
index f92f4a2..39dbf17 100644
--- a/src/virtual-frame.cc
+++ b/src/virtual-frame.cc
@@ -38,7 +38,7 @@
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
- : elements_(original->elements_.length()),
+ : elements_(original->element_count()),
stack_pointer_(original->stack_pointer_) {
elements_.AddAll(original->elements_);
// Copy register locations from original.
@@ -50,7 +50,7 @@
FrameElement VirtualFrame::CopyElementAt(int index) {
ASSERT(index >= 0);
- ASSERT(index < elements_.length());
+ ASSERT(index < element_count());
FrameElement target = elements_[index];
FrameElement result;
@@ -96,7 +96,7 @@
// pushing an exception handler). No code is emitted.
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
for (int i = 0; i < count; i++) {
elements_.Add(FrameElement::MemoryElement());
@@ -107,7 +107,7 @@
void VirtualFrame::ForgetElements(int count) {
ASSERT(count >= 0);
- ASSERT(elements_.length() >= count);
+ ASSERT(element_count() >= count);
for (int i = 0; i < count; i++) {
FrameElement last = elements_.RemoveLast();
@@ -118,7 +118,7 @@
if (cgen()->frame() == this) {
Unuse(last.reg());
} else {
- register_locations_[last.reg().code()] = kIllegalIndex;
+ set_register_location(last.reg(), kIllegalIndex);
}
}
}
@@ -127,14 +127,13 @@
// If there are any registers referenced only by the frame, spill one.
Register VirtualFrame::SpillAnyRegister() {
- // Find the leftmost (ordered by register code) register whose only
+ // Find the leftmost (ordered by register number) register whose only
// reference is in the frame.
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i) && cgen()->allocator()->count(i) == 1) {
- Register result = { i };
- Spill(result);
- ASSERT(!cgen()->allocator()->is_used(result));
- return result;
+ SpillElementAt(register_location(i));
+ ASSERT(!cgen()->allocator()->is_used(i));
+ return RegisterAllocator::ToRegister(i);
}
}
return no_reg;
@@ -173,7 +172,7 @@
// Make the type of all elements be MEMORY.
void VirtualFrame::SpillAll() {
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
SpillElementAt(i);
}
}
@@ -183,7 +182,7 @@
// Perform state changes on this frame that will make merge to the
// expected frame simpler or else increase the likelihood that his
// frame will match another.
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
@@ -200,7 +199,7 @@
if (cgen()->frame() == this) {
Unuse(source.reg());
} else {
- register_locations_[source.reg().code()] = kIllegalIndex;
+ set_register_location(source.reg(), kIllegalIndex);
}
}
elements_[i] = target;
@@ -224,16 +223,16 @@
ASSERT(height() >= spilled_args);
ASSERT(dropped_args <= spilled_args);
- SyncRange(0, elements_.length() - 1);
+ SyncRange(0, element_count() - 1);
// Spill registers.
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) {
- SpillElementAt(register_locations_[i]);
+ SpillElementAt(register_location(i));
}
}
// Spill the arguments.
- for (int i = elements_.length() - spilled_args; i < elements_.length(); i++) {
+ for (int i = element_count() - spilled_args; i < element_count(); i++) {
if (!elements_[i].is_memory()) {
SpillElementAt(i);
}
@@ -257,9 +256,9 @@
void VirtualFrame::SetElementAt(int index, Result* value) {
- int frame_index = elements_.length() - index - 1;
+ int frame_index = element_count() - index - 1;
ASSERT(frame_index >= 0);
- ASSERT(frame_index < elements_.length());
+ ASSERT(frame_index < element_count());
ASSERT(value->is_valid());
FrameElement original = elements_[frame_index];
@@ -283,7 +282,7 @@
// The register already appears on the frame. Either the existing
// register element, or the new element at frame_index, must be made
// a copy.
- int i = register_index(value->reg());
+ int i = register_location(value->reg());
ASSERT(value->static_type() == elements_[i].static_type());
if (i < frame_index) {
@@ -299,8 +298,8 @@
elements_[i].set_sync();
}
elements_[frame_index].clear_sync();
- register_locations_[value->reg().code()] = frame_index;
- for (int j = i + 1; j < elements_.length(); j++) {
+ set_register_location(value->reg(), frame_index);
+ for (int j = i + 1; j < element_count(); j++) {
if (elements_[j].is_copy() && elements_[j].index() == i) {
elements_[j].set_index(frame_index);
}
@@ -331,12 +330,12 @@
void VirtualFrame::Push(Register reg, StaticType static_type) {
if (is_used(reg)) {
- int index = register_index(reg);
+ int index = register_location(reg);
FrameElement element = CopyElementAt(index);
ASSERT(static_type.merge(element.static_type()) == element.static_type());
elements_.Add(element);
} else {
- Use(reg, elements_.length());
+ Use(reg, element_count());
FrameElement element =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED,
@@ -366,15 +365,15 @@
bool VirtualFrame::Equals(VirtualFrame* other) {
#ifdef DEBUG
- for (int i = 0; i < kNumRegisters; i++) {
- if (register_locations_[i] != other->register_locations_[i]) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (register_location(i) != other->register_location(i)) {
return false;
}
}
- if (elements_.length() != other->elements_.length()) return false;
+ if (element_count() != other->element_count()) return false;
#endif
if (stack_pointer_ != other->stack_pointer_) return false;
- for (int i = 0; i < elements_.length(); i++) {
+ for (int i = 0; i < element_count(); i++) {
if (!elements_[i].Equals(other->elements_[i])) return false;
}
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index ff37f05..6d3ed15 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -41,17 +41,65 @@
// -----------------------------------------------------------------------------
// Implementation of Assembler
-#define EMIT(x) \
- *pc_++ = (x)
-void Assembler::emit_rex_64(Register reg, Register rm_reg) {
- EMIT(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+void Assembler::emitl(uint32_t x) {
+ Memory::uint32_at(pc_) = x;
+ pc_ += sizeof(uint32_t);
}
+void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+ Memory::uint64_at(pc_) = x;
+ RecordRelocInfo(rmode, x);
+}
+
+
+// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+// REX.W is set. REX.X is cleared.
+void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+// The high bit of reg is used for REX.R, the high bit of op's base
+// register is used for REX.B, and the high bit of op's index register
+// is used for REX.X. REX.W is set.
void Assembler::emit_rex_64(Register reg, const Operand& op) {
- EMIT(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+// REX.W is set. REX.X is cleared.
+void Assembler::emit_rex_32(Register reg, Register rm_reg) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+// The high bit of reg is used for REX.R, the high bit of op's base
+// register is used for REX.B, and the high bit of op's index register
+// is used for REX.X. REX.W is cleared.
+void Assembler::emit_rex_32(Register reg, const Operand& op) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+// REX.W and REX.X are cleared. If no REX bits are set, no byte is emitted.
+void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
+ if (rex_bits) emit(0x40 | rex_bits);
+}
+
+
+// The high bit of reg is used for REX.R, the high bit of op's base
+// register is used for REX.B, and the high bit of op's index register
+// is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
+// is emitted.
+void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
+ if (rex_bits) emit(0x40 | rex_bits);
}
@@ -65,8 +113,6 @@
return NULL;
}
-#undef EMIT
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -169,13 +215,32 @@
return reinterpret_cast<Object**>(pc_ + 1);
}
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp) {
+ len_ = 1;
+ if (base.is(rsp) || base.is(r12)) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+}
void Operand::set_modrm(int mod, Register rm) {
ASSERT((mod & -4) == 0);
buf_[0] = mod << 6 | (rm.code() & 0x7);
// Set REX.B to the high bit of rm.code().
rex_ |= (rm.code() >> 3);
- len_ = 1;
}
@@ -189,8 +254,15 @@
len_ = 2;
}
+void Operand::set_disp8(int disp) {
+ ASSERT(is_int8(disp));
+ ASSERT(len_ == 1 || len_ == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int8_t);
+}
-void Operand::set_disp32(int32_t disp) {
+void Operand::set_disp32(int disp) {
ASSERT(len_ == 1 || len_ == 2);
int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
*p = disp;
@@ -198,11 +270,6 @@
}
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
} } // namespace v8::internal
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d1b4d46..acea713 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -28,15 +28,49 @@
#include "v8.h"
#include "macro-assembler.h"
+#include "serialize.h"
namespace v8 {
namespace internal {
-Register no_reg = { -1 };
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
Register rax = { 0 };
Register rcx = { 1 };
-Register rsi = { 7 };
+Register rdx = { 2 };
+Register rbx = { 3 };
+Register rsp = { 4 };
+Register rbp = { 5 };
+Register rsi = { 6 };
+Register rdi = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register r11 = { 11 };
+Register r12 = { 12 };
+Register r13 = { 13 };
+Register r14 = { 14 };
+Register r15 = { 15 };
+Register no_reg = { -1 };
+
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+XMMRegister xmm8 = { 8 };
+XMMRegister xmm9 = { 9 };
+XMMRegister xmm10 = { 10 };
+XMMRegister xmm11 = { 11 };
+XMMRegister xmm12 = { 12 };
+XMMRegister xmm13 = { 13 };
+XMMRegister xmm14 = { 14 };
+XMMRegister xmm15 = { 15 };
// Safe default is no features.
uint64_t CpuFeatures::supported_ = 0;
@@ -49,10 +83,6 @@
// -----------------------------------------------------------------------------
// Implementation of Assembler
-// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
-
#ifdef GENERATED_CODE_COVERAGE
static void InitCoverageLog();
#endif
@@ -144,21 +174,33 @@
}
}
-void Assembler::RecordComment(char const* a) {
- UNIMPLEMENTED();
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(!L->is_bound()); // Label may only be bound once.
+ last_pc_ = NULL;
+ ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
+ if (L->is_linked()) {
+ int current = L->pos();
+ int next = long_at(current);
+ while (next != current) {
+ // relative address, relative to point after address
+ int imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, imm32);
+ current = next;
+ next = long_at(next);
+ }
+ // Fix up last fixup on linked list.
+ int last_imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, last_imm32);
+ }
+ L->bind_to(pos);
}
-void Assembler::RecordPosition(int a) {
- UNIMPLEMENTED();
+
+void Assembler::bind(Label* L) {
+ bind_to(L, pc_offset());
}
-void Assembler::RecordStatementPosition(int a) {
- UNIMPLEMENTED();
-}
-
-void Assembler::bind(Label* a) {
- UNIMPLEMENTED();
-}
void Assembler::GrowBuffer() {
ASSERT(overflow()); // should not call this otherwise
@@ -242,114 +284,297 @@
}
-void Assembler::add(Register dst, const Operand& src) {
+// Assembler Instruction implementations
+
+void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
- EMIT(0x03);
- emit_operand(dst, src);
+ emit_rex_64(reg, op);
+ emit(opcode);
+ emit_operand(reg, op);
}
-void Assembler::add(Register dst, Register src) {
+void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst, src);
- EMIT(0x03);
- EMIT(0xC0 | (src.code() & 0x7) << 3 | (dst.code() & 0x7));
+ emit(opcode);
+ emit(0xC0 | (dst.code() & 0x7) << 3 | (src.code() & 0x7));
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit(0xC0 | (subcode << 3) | (dst.code() & 0x7));
+ emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
+ } else {
+ emit(0x81);
+ emit(0xC0 | (subcode << 3) | (dst.code() & 0x7));
+ emitl(src.value_);
+ }
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_operand(Register::toRegister(subcode), dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(Register::toRegister(subcode), dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::call(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp
+ emit(0xE8);
+ if (L->is_bound()) {
+ int offset = L->pos() - pc_offset() - sizeof(int32_t);
+ ASSERT(offset <= 0);
+ emitl(offset);
+ } else if (L->is_linked()) {
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
}
void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rcx, dst);
- EMIT(0xFF);
- EMIT(0xC8 | (dst.code() & 0x7));
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit(0xC8 | (dst.code() & 0x7));
}
void Assembler::dec(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rax, dst);
- EMIT(0xFF);
- emit_operand(rcx, dst);
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(1, dst);
}
void Assembler::hlt() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xF4);
+ emit(0xF4);
}
void Assembler::inc(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rax, dst);
- EMIT(0xFF);
- EMIT(0xC0 | (dst.code() & 0x7));
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit(0xC0 | (dst.code() & 0x7));
}
void Assembler::inc(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rax, dst);
- EMIT(0xFF);
- emit_operand(rax, dst);
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(0, dst);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xCC);
+ emit(0xCC);
}
-void Assembler::mov(Register dst, const Operand& src) {
+void Assembler::j(Condition cc, Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ emit((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(offs - long_size);
+ }
+ } else if (L->is_linked()) {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ emit(0x0F);
+ emit(0x80 | cc);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::jmp(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ int offs = L->pos() - pc_offset() - 1;
+ ASSERT(offs <= 0);
+ if (is_int8(offs - sizeof(int8_t))) {
+ // 1110 1011 #8-bit disp
+ emit(0xEB);
+ emit((offs - sizeof(int8_t)) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(offs - sizeof(int32_t));
+ }
+ } else if (L->is_linked()) {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ // 1110 1001 #32-bit disp
+ ASSERT(L->is_unused());
+ emit(0xE9);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::movq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst, src);
- EMIT(0x8B);
+ emit(0x8B);
emit_operand(dst, src);
}
-void Assembler::mov(Register dst, Register src) {
+void Assembler::movq(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst, src);
- EMIT(0x89);
- EMIT(0xC0 | (src.code() & 0x7) << 3 | (dst.code() & 0x7));
+ emit(0x8B);
+ emit(0xC0 | (dst.code() & 0x7) << 3 | (src.code() & 0x7));
+}
+
+
+void Assembler::movq(Register dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xC7);
+ emit(0xC0 | (dst.code() & 0x7));
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7));
+ emitq(value, rmode);
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit(0xC0 | (0x3 << 3) | (dst.code() & 0x7));
+}
+
+
+void Assembler::neg(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(3, dst);
}
void Assembler::nop() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0x90);
+ emit(0x90);
}
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit(0xC0 | (0x2 << 3) | (dst.code() & 0x7));
+}
+
+
+void Assembler::not_(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(2, dst);
+}
+
+
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (dst.code() & 0x8) {
- emit_rex_64(rax, dst);
+ emit_rex_64(dst);
}
- EMIT(0x58 | (dst.code() & 0x7));
+ emit(0x58 | (dst.code() & 0x7));
}
void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rax, dst); // Could be omitted in some cases.
- EMIT(0x8F);
- emit_operand(rax, dst);
+ emit_rex_64(dst); // Could be omitted in some cases.
+ emit(0x8F);
+ emit_operand(0, dst);
}
@@ -357,18 +582,18 @@
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (src.code() & 0x8) {
- emit_rex_64(rax, src);
+ emit_rex_64(src);
}
- EMIT(0x50 | (src.code() & 0x7));
+ emit(0x50 | (src.code() & 0x7));
}
void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(rsi, src); // Could be omitted in some cases.
- EMIT(0xFF);
- emit_operand(rsi, src);
+ emit_rex_64(src); // Could be omitted in some cases.
+ emit(0xFF);
+ emit_operand(6, src);
}
@@ -377,14 +602,135 @@
last_pc_ = pc_;
ASSERT(is_uint16(imm16));
if (imm16 == 0) {
- EMIT(0xC3);
+ emit(0xC3);
} else {
- EMIT(0xC2);
- EMIT(imm16 & 0xFF);
- EMIT((imm16 >> 8) & 0xFF);
+ emit(0xC2);
+ emit(imm16 & 0xFF);
+ emit((imm16 >> 8) & 0xFF);
}
}
+
+void Assembler::testb(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA8);
+ emit(mask);
+ } else {
+ if (reg.code() & 0x8) {
+ emit_rex_32(rax, reg);
+ }
+ emit(0xF6);
+ emit(0xC0 | (reg.code() & 0x3));
+ emit(mask.value_); // Low byte emitted.
+ }
+}
+
+
+void Assembler::testb(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF6);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask.value_); // Low byte emitted.
+}
+
+
+void Assembler::testl(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA9);
+ emit(mask);
+ } else {
+ emit_optional_rex_32(rax, reg);
+ emit(0xF7);
+ emit(0xC0 | (reg.code() & 0x3));
+ emit(mask);
+ }
+}
+
+
+void Assembler::testl(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF7);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask);
+}
+
+
+// Relocation information implementations
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(rmode != RelocInfo::NONE);
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
} } // namespace v8::internal
@@ -535,18 +881,6 @@
return NULL;
}
-bool RegisterAllocator::IsReserved(int a) {
- UNIMPLEMENTED();
- return false;
-}
-
-RegisterFile RegisterAllocator::Reserved() {
- UNIMPLEMENTED();
- return RegisterFile();
-}
-
-const int RelocInfo::kApplyMask = -1;
-
StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
UNIMPLEMENTED();
return NONE;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index ba40eb3..06a7c40 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -61,10 +61,13 @@
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
-const int kNumRegisters = 16;
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ static Register toRegister(int code) {
+ Register r = {code};
+ return r;
+ }
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; }
// The byte-register distinction of ai32 has dissapeared.
bool is_byte_register() const { return false; }
@@ -99,7 +102,6 @@
extern Register r15;
extern Register no_reg;
-
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
int code() const {
@@ -118,6 +120,14 @@
extern XMMRegister xmm5;
extern XMMRegister xmm6;
extern XMMRegister xmm7;
+extern XMMRegister xmm8;
+extern XMMRegister xmm9;
+extern XMMRegister xmm10;
+extern XMMRegister xmm11;
+extern XMMRegister xmm12;
+extern XMMRegister xmm13;
+extern XMMRegister xmm14;
+extern XMMRegister xmm15;
enum Condition {
// any value < 0 is considered no_condition
@@ -201,34 +211,11 @@
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int64_t x);
- inline explicit Immediate(const char* s);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
+ explicit Immediate(int32_t value) : value_(value) {}
inline explicit Immediate(Smi* value);
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
- }
- bool is_int32() const {
- return V8_INT64_C(-2147483648) <= x_
- && x_ < V8_INT64_C(2147483648)
- && rmode_ == RelocInfo::NONE;
- }
-
private:
- inline explicit Immediate(Label* value) { UNIMPLEMENTED(); }
-
- int64_t x_;
- RelocInfo::Mode rmode_;
+ int32_t value_;
friend class Assembler;
};
@@ -247,142 +234,43 @@
class Operand BASE_EMBEDDED {
public:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // MemoryOperand
- INLINE(explicit Operand()) { UNIMPLEMENTED(); }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // These constructors have been moved to MemOperand, and should
- // be removed from Operand as soon as all their uses use MemOperands instead.
- // [disp/r]
- INLINE(explicit Operand(intptr_t disp, RelocInfo::Mode rmode)) {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
// [base + disp/r]
- explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ INLINE(Operand(Register base, int32_t disp));
// [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp);
// [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // End of constructors and methods that have been moved to MemOperand.
+ Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp);
private:
byte rex_;
byte buf_[10];
// The number of bytes in buf_.
unsigned int len_;
- // Only valid if len_ > 4.
RelocInfo::Mode rmode_;
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
+ // set_modrm can be called before or after set_sib and set_disp*.
inline void set_modrm(int mod, Register rm);
+ // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_disp32(int32_t disp);
+
+ // Adds operand displacement fields (offsets added to the memory address).
+ // Needs to be called after set_sib, not before it.
+ inline void set_disp8(int disp);
+ inline void set_disp32(int disp);
friend class Assembler;
};
-class MemOperand : public Operand {
- public:
- // [disp/r]
- INLINE(explicit MemOperand(int32_t disp, RelocInfo::Mode rmode)) :
- Operand() {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit MemOperand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [base + index*scale + disp/r]
- explicit MemOperand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [index*scale + disp/r]
- explicit MemOperand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-};
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
-};
-
-
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
@@ -515,21 +403,27 @@
void leave();
// Moves
- void mov_b(Register dst, const Operand& src);
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
+ void movb(Register dst, const Operand& src);
+ void movb(const Operand& dst, int8_t imm8);
+ void movb(const Operand& dst, Register src);
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
+ void movq(Register dst, int32_t imm32);
+ void movq(Register dst, Immediate x);
+ void movq(Register dst, const Operand& src);
+ void movq(Register dst, Register src);
+ void movq(const Operand& dst, const Immediate& x);
+ void movq(const Operand& dst, Register src);
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
+ // New x64 instructions to load a 64-bit immediate into a register.
+ // All 64-bit immediates must have a relocation mode.
+ void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+ void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
+ void movq(Register dst, const char* s, RelocInfo::Mode rmode);
+ void movq(Register dst, const ExternalReference& ext, RelocInfo::Mode rmode);
+ void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+
+ // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+ void load_rax(void* ptr, RelocInfo::Mode rmode);
void movsx_b(Register dst, const Operand& src);
@@ -548,26 +442,71 @@
void xchg(Register dst, Register src);
// Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
+ void add(Register dst, Register src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void add(Register dst, Register src);
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, const Immediate& x);
+ void add(Register dst, const Operand& src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
- void and_(const Operand& dst, const Immediate& x);
+
+ void add(const Operand& dst, Register src) {
+ arithmetic_op(0x01, src, dst);
+ }
+
+ void add(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void add(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void cmp(Register dst, Register src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(Register dst, const Operand& src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(const Operand& dst, Register src) {
+ arithmetic_op(0x39, src, dst);
+ }
+
+ void cmp(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void cmp(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void and_(Register dst, Register src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(Register dst, const Operand& src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(const Operand& dst, Register src) {
+ arithmetic_op(0x21, src, dst);
+ }
+
+ void and_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
+
+ void and_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
void cmpb(const Operand& op, int8_t imm8);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg, const Operand& op);
- void cmp(const Operand& op, const Immediate& imm);
void dec_b(Register dst);
@@ -589,13 +528,31 @@
void mul(Register src);
void neg(Register dst);
+ void neg(const Operand& dst);
void not_(Register dst);
+ void not_(const Operand& dst);
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(const Operand& dst, const Immediate& x);
+ void or_(Register dst, Register src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(Register dst, const Operand& src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(const Operand& dst, Register src) {
+ arithmetic_op(0x09, src, dst);
+ }
+
+ void or_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
+
+ void or_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
+
void rcl(Register dst, uint8_t imm8);
@@ -615,18 +572,51 @@
void shr(Register dst);
void shr_cl(Register dst);
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
+ void sub(Register dst, Register src) {
+ arithmetic_op(0x2B, dst, src);
+ }
- void test(Register reg, const Immediate& imm);
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
+ void sub(Register dst, const Operand& src) {
+ arithmetic_op(0x2B, dst, src);
+ }
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
- void xor_(const Operand& dst, const Immediate& x);
+ void sub(const Operand& dst, Register src) {
+ arithmetic_op(0x29, src, dst);
+ }
+
+ void sub(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void sub(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void testb(Register reg, Immediate mask);
+ void testb(const Operand& op, Immediate mask);
+ void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Immediate mask);
+
+ void xor_(Register dst, Register src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(Register dst, const Operand& src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(const Operand& dst, Register src) {
+ arithmetic_op(0x31, src, dst);
+ }
+
+ void xor_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
+
+ void xor_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
+
// Bit operations.
void bt(const Operand& dst, Register src);
@@ -669,9 +659,9 @@
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Conditional jumps
- void j(Condition cc, Label* L, Hint hint = no_hint);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+ void j(Condition cc, Label* L);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
@@ -764,9 +754,13 @@
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- // Writes a single word of data in the code stream.
+ // Writes a doubleword of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void dd(uint32_t data, RelocInfo::Mode reloc_info);
+ void dd(uint32_t data);
+
+ // Writes a quadword of data in the code stream.
+ // Used for inline tables, e.g., jump-tables.
+ void dd(uint64_t data, RelocInfo::Mode reloc_info);
// Writes the absolute address of a bound label at the given position in
// the generated code. That positions should have the relocation mode
@@ -809,33 +803,67 @@
// code emission
void GrowBuffer();
- inline void emit(uint32_t x);
+
+ void emit(byte x) { *pc_++ = x; }
+ inline void emitl(uint32_t x);
inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x, RelocInfo::Mode rmode);
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
+ inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is set.
inline void emit_rex_64(Register reg, Register rm_reg);
+ void emit_rex_64(Register rm_reg) { emit_rex_64(rax, rm_reg); }
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is set.
inline void emit_rex_64(Register reg, const Operand& op);
+ void emit_rex_64(const Operand& op) { emit_rex_64(rax, op); }
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is set.
+ inline void emit_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared.
+ inline void emit_rex_32(Register reg, const Operand& op);
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is cleared. If no REX bits are set, no byte is emitted.
+ inline void emit_optional_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
+ // is emitted.
+ inline void emit_optional_rex_32(Register reg, const Operand& op);
+
+ // Emit the Mod/RM byte, and optionally the SIB byte and
+ // 1- or 4-byte offset for a memory operand. Also encodes
+ // the second operand of the operation, a register or operation
+ // subcode, into the Mod/RM byte.
+ void emit_operand(Register reg, const Operand& adr);
+ void emit_operand(int op_subcode, const Operand& adr) {
+ emit_operand(Register::toRegister(op_subcode), adr);
+ }
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
-
- void emit_operand(Register reg, const Operand& adr);
+ // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
+ // AND, OR, XOR, or CMP. The encodings of these operations are all
+ // similar, differing just in the opcode or in the reg field of the
+ // Mod/RM byte.
+ void arithmetic_op(byte opcode, Register dst, Register src);
+ void arithmetic_op(byte opcode, Register reg, const Operand& op);
+ void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
+ void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
void emit_farith(int b1, int b2, int i);
@@ -844,11 +872,6 @@
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
-
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 374cf83..5f5daa4 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -333,8 +333,7 @@
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -578,14 +577,14 @@
void CodeForSourcePosition(int pos);
#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should be
- // no frame-external references to eax, ebx, ecx, edx, or edi.
+ // True if the registers are valid for entry to a block. There should
+ // be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
- List<DeferredCode*> deferred_;
+ ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
new file mode 100644
index 0000000..f369d7d
--- /dev/null
+++ b/src/x64/register-allocator-x64-inl.h
@@ -0,0 +1,69 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // All registers are reserved for now.
+ return true;
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ UNIMPLEMENTED();
+ return -1;
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ UNIMPLEMENTED();
+ return no_reg;
+}
+
+
+void RegisterAllocator::Initialize() {
+ UNIMPLEMENTED();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
new file mode 100644
index 0000000..bc08112
--- /dev/null
+++ b/src/x64/register-allocator-x64.h
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ // Register allocation is not yet implemented on x64, but C++
+ // forbids 0-length arrays so we use 1 as the number of registers.
+ static const int kNumRegisters = 1;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index f02502b..deba7aa 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -83,21 +83,35 @@
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
int height() {
- return elements_.length() - expression_base_index();
+ return element_count() - expression_base_index();
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+ }
+
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -112,7 +126,7 @@
// handler). No code will be emitted.
void Forget(int count) {
ASSERT(count >= 0);
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
ForgetElements(count);
}
@@ -127,7 +141,7 @@
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_index(reg));
+ if (is_used(reg)) SpillElementAt(register_location(reg));
}
// Spill all occurrences of an arbitrary register if possible. Return the
@@ -135,6 +149,9 @@
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
// Make this frame so that an arbitrary frame of the same height can
// be merged to it. Copies and constants are removed from the
// topmost mergable_elements elements of the frame. A
@@ -158,11 +175,8 @@
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Unuse(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
}
}
@@ -172,11 +186,8 @@
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < kNumRegisters; i++) {
- if (is_used(i)) {
- Register temp = { i };
- cgen_allocator->Use(temp);
- }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Use(i);
}
}
@@ -211,11 +222,11 @@
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
void StoreToElementAt(int index) {
- StoreToFrameSlotAt(elements_.length() - index - 1);
+ StoreToFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
@@ -352,7 +363,7 @@
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -407,7 +418,7 @@
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
@@ -420,7 +431,7 @@
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately
// above the frame pointer.
@@ -440,8 +451,8 @@
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
- ASSERT(index < elements_.length());
- ASSERT(frame_pointer() < elements_.length()); // FP is on the frame.
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
@@ -450,7 +461,7 @@
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
- register_locations_[reg.code()] = index;
+ set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
@@ -458,8 +469,8 @@
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
- ASSERT(register_locations_[reg.code()] != kIllegalIndex);
- register_locations_[reg.code()] = kIllegalIndex;
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
@@ -473,9 +484,6 @@
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
- void SyncRange(int begin, int end);
-
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);